Package org.apache.hadoop.hive.metastore.api

Examples of org.apache.hadoop.hive.metastore.api.Partition


   */
  public void testPartition() throws HiveException, URISyntaxException {
    StorageDescriptor sd = new StorageDescriptor();
    sd.setLocation("partlocation");
   
    Partition tp = new Partition();
    tp.setTableName(TABLENAME);
    tp.setSd(sd);
   
    List<String> values = new ArrayList<String>();
    values.add(PARTITION_VALUE);
    tp.setValues(values);
   
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    partCols.add(new FieldSchema(PARTITION_COL, "string", ""));
   
    Table tbl = new Table(TABLENAME);
View Full Code Here


      }

      // should have one
      parts = client.listPartitions(dbName, tableName, (short) -1);
      assertEquals(1, parts.size());
      Partition insertedPart = parts.get(0);
      assertEquals(tbl.getSd().getLocation() + Path.SEPARATOR + partitionLocation,
          insertedPart.getSd().getLocation());

      client.dropPartition(dbName, tableName, insertedPart.getValues());

      // add without location specified

      AddPartitionDesc addPartition = new AddPartitionDesc(dbName, tableName, part1, null);
      Task<DDLWork> task = TaskFactory.get(new DDLWork(addPartition), hiveConf);
View Full Code Here

        if(LOG.isDebugEnabled()) {
          for (String part : part_vals) {
            LOG.debug(part);
          }
        }
        Partition part = new Partition();
        boolean success = false;
        try {
          getMS().openTransaction();
          part = new Partition();
          part.setDbName(dbName);
          part.setTableName(tableName);
          part.setValues(part_vals);

          Table tbl = getMS().getTable(part.getDbName(), part.getTableName());
          if(tbl == null) {
            throw new InvalidObjectException("Unable to add partition because table or database do not exist");
          }

          part.setSd(tbl.getSd());
          Path partLocation = new Path(tbl.getSd().getLocation(), Warehouse.makePartName(tbl.getPartitionKeys(), part_vals));
          part.getSd().setLocation(partLocation.toString());

          Partition old_part = this.get_partition(part.getDbName(), part.getTableName(), part.getValues());
          if( old_part != null) {
            throw new AlreadyExistsException("Partition already exists:" + part);
          }
         
          success = getMS().addPartition(part);
View Full Code Here

        this.incrementCounter("add_partition");
        logStartFunction("add_partition", part.getDbName(), part.getTableName());
        boolean success = false;
        try {
          getMS().openTransaction();
          Partition old_part = this.get_partition(part.getDbName(), part.getTableName(), part.getValues());
          if( old_part != null) {
            throw new AlreadyExistsException("Partition already exists:" + part);
          }
          Table tbl = getMS().getTable(part.getDbName(), part.getTableName());
          if(tbl == null) {
View Full Code Here

        boolean success = false;
        Path partPath = null;
        Table tbl = null;
        try {
          getMS().openTransaction();
          Partition part = this.get_partition(db_name, tbl_name, part_vals);
          if(part == null) {
            throw new NoSuchObjectException("Partition doesn't exist. " + part_vals);
          }
          if(part.getSd() == null  || part.getSd().getLocation() == null) {
            throw new MetaException("Partition metadata is corrupted");
          }
          if(!getMS().dropPartition(db_name, tbl_name, part_vals)) {
            throw new MetaException("Unable to drop partition");
          }
          success  = getMS().commitTransaction();
          partPath = new Path(part.getSd().getLocation());
          tbl = get_table(db_name, tbl_name);
        } finally {
          if(!success) {
            getMS().rollbackTransaction();
          } else if(deleteData && (partPath != null)) {
View Full Code Here

    tbl.getPartitionKeys().add(new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
    tbl.getPartitionKeys().add(new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
 
    client.createTable(tbl);
 
    Partition part = new Partition();
    part.setDbName(dbName);
    part.setTableName(tblName);
    part.setValues(vals);
    part.setParameters(new HashMap<String, String>());
    part.setSd(tbl.getSd());
    part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
    part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
 
    Partition retp = client.add_partition(part);
    assertNotNull("Unable to create partition " + part, retp);
 
    Partition part2 = client.getPartition(dbName, tblName, part.getValues());
    assertTrue("Partitions are not same",part.equals(part2));
 
    FileSystem fs = FileSystem.get(this.hiveConf);
    Path partPath = new Path(part2.getSd().getLocation());
   
    assertTrue(fs.exists(partPath));
    ret = client.dropPartition(dbName, tblName, part.getValues(), true);
    assertTrue(ret);
    assertFalse(fs.exists(partPath));
View Full Code Here

          throw new MetaException("Unexpected null for one of the IDs, SD " + sdId + ", column "
              + colId + ", serde " + serdeId + " for a " + (isView ? "" : "non-") + " view");
        }
      }

      Partition part = new Partition();
      orderedResult.add(part);
      // Set the collection fields; some code might not check presence before accessing them.
      part.setParameters(new HashMap<String, String>());
      part.setValues(new ArrayList<String>());
      part.setDbName(dbName);
      part.setTableName(tblName);
      if (fields[4] != null) part.setCreateTime((Integer)fields[4]);
      if (fields[5] != null) part.setLastAccessTime((Integer)fields[5]);
      partitions.put(partitionId, part);

      if (sdId == null) continue; // Probably a view.
      assert colId != null && serdeId != null;

      // We assume each partition has an unique SD.
      StorageDescriptor sd = new StorageDescriptor();
      StorageDescriptor oldSd = sds.put(sdId, sd);
      if (oldSd != null) {
        throw new MetaException("Partitions reuse SDs; we don't expect that");
      }
      // Set the collection fields; some code might not check presence before accessing them.
      sd.setSortCols(new ArrayList<Order>());
      sd.setBucketCols(new ArrayList<String>());
      sd.setParameters(new HashMap<String, String>());
      sd.setSkewedInfo(new SkewedInfo(new ArrayList<String>(),
          new ArrayList<List<String>>(), new HashMap<List<String>, String>()));
      sd.setInputFormat((String)fields[6]);
      Boolean tmpBoolean = extractSqlBoolean(fields[7]);
      if (tmpBoolean != null) sd.setCompressed(tmpBoolean);
      tmpBoolean = extractSqlBoolean(fields[8]);
      if (tmpBoolean != null) sd.setStoredAsSubDirectories(tmpBoolean);
      sd.setLocation((String)fields[9]);
      if (fields[10] != null) sd.setNumBuckets((Integer)fields[10]);
      sd.setOutputFormat((String)fields[11]);
      sdSb.append(sdId).append(",");
      part.setSd(sd);

      List<FieldSchema> cols = colss.get(colId);
      // We expect that colId will be the same for all (or many) SDs.
      if (cols == null) {
        cols = new ArrayList<FieldSchema>();
View Full Code Here

    String partLocnRoot, Map<String, String> partKVs,
    HCatSchema outputSchema, Map<String, String> params,
    Table table, FileSystem fs,
    String grpName, FsPermission perms) throws IOException {

    Partition partition = new Partition();
    partition.setDbName(table.getDbName());
    partition.setTableName(table.getTableName());
    partition.setSd(new StorageDescriptor(table.getTTable().getSd()));

    List<FieldSchema> fields = new ArrayList<FieldSchema>();
    for (HCatFieldSchema fieldSchema : outputSchema.getFields()) {
      fields.add(HCatSchemaUtils.getFieldSchema(fieldSchema));
    }

    partition.getSd().setCols(fields);

    partition.setValues(FileOutputFormatContainer.getPartitionValueList(table, partKVs));

    partition.setParameters(params);

    // Sets permissions and group name on partition dirs and files.

    Path partPath;
    if (Boolean.valueOf((String)table.getProperty("EXTERNAL"))
         && jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) {
      // honor external table that specifies the location
      partPath = new Path(jobInfo.getLocation());
    } else {
      partPath = new Path(partLocnRoot);
      int i = 0;
      for (FieldSchema partKey : table.getPartitionKeys()) {
        if (i++ != 0) {
          applyGroupAndPerms(fs, partPath, perms, grpName, false);
        }
        partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
      }
    }

    // Apply the group and permissions to the leaf partition and files.
    // Need not bother in case of HDFS as permission is taken care of by setting UMask
    if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) {
      applyGroupAndPerms(fs, partPath, perms, grpName, true);
    }

    // Set the location in the StorageDescriptor
    if (dynamicPartitioningUsed) {
      String dynamicPartitionDestination = getFinalDynamicPartitionDestination(table, partKVs);
      if (harProcessor.isEnabled()) {
        harProcessor.exec(context, partition, partPath);
        partition.getSd().setLocation(
          harProcessor.getProcessedLocation(new Path(dynamicPartitionDestination)));
      } else {
        partition.getSd().setLocation(dynamicPartitionDestination);
      }
    } else {
      partition.getSd().setLocation(partPath.toString());
    }
    return partition;
  }
View Full Code Here

    // Subscriber can get notification of newly add partition in a
    // particular table by listening on a topic named "dbName.tableName"
    // and message selector string as "HCAT_EVENT = HCAT_ADD_PARTITION"
    if (partitionEvent.getStatus()) {

      Partition partition = partitionEvent.getPartition();
      String topicName = getTopicName(partition, partitionEvent);
      if (topicName != null && !topicName.equals("")) {
        send(messageFactory.buildAddPartitionMessage(partitionEvent.getTable(), partition), topicName);
      } else {
        LOG.info("Topic name not found in metastore. Suppressing HCatalog notification for "
          + partition.getDbName()
          + "."
          + partition.getTableName()
          + " To enable notifications for this table, please do alter table set properties ("
          + HCatConstants.HCAT_MSGBUS_TOPIC_NAME
          + "=<dbname>.<tablename>) or whatever you want topic name to be.");
      }
    }
View Full Code Here

   * HIVE-2084 "Upgrade datanucleus from 2.0.3 to 3.0.1" is resolved.
   */
  @Override
  public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException {
    if (partitionEvent.getStatus()) {
      Partition partition = partitionEvent.getPartition();
      StorageDescriptor sd = partition.getSd();
      sd.setBucketCols(new ArrayList<String>());
      sd.setSortCols(new ArrayList<Order>());
      sd.setParameters(new HashMap<String, String>());
      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
      sd.getSkewedInfo().setSkewedColNames(new ArrayList<String>());
      String topicName = getTopicName(partition, partitionEvent);
      if (topicName != null && !topicName.equals("")) {
        send(messageFactory.buildDropPartitionMessage(partitionEvent.getTable(), partition), topicName);
      } else {
        LOG.info("Topic name not found in metastore. Suppressing HCatalog notification for "
          + partition.getDbName()
          + "."
          + partition.getTableName()
          + " To enable notifications for this table, please do alter table set properties ("
          + HCatConstants.HCAT_MSGBUS_TOPIC_NAME
          + "=<dbname>.<tablename>) or whatever you want topic name to be.");
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.api.Partition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.