Package org.apache.hadoop.hive.metastore.api

Examples of org.apache.hadoop.hive.metastore.api.Partition


      try {
        client = HowlOutputFormat.createHiveClient(tableInfo.getServerUri(), conf);

        StorerInfo storer = InitializeInput.extractStorerInfo(table.getSd(),table.getParameters());

        Partition partition = new Partition();
        partition.setDbName(tableInfo.getDatabaseName());
        partition.setTableName(tableInfo.getTableName());
        partition.setSd(new StorageDescriptor(tblSD));
        partition.getSd().setLocation(jobInfo.getLocation());

        updateTableSchema(client, table, jobInfo.getOutputSchema());

        List<FieldSchema> fields = new ArrayList<FieldSchema>();
        for(HowlFieldSchema fieldSchema : jobInfo.getOutputSchema().getFields()) {
          fields.add(HowlSchemaUtils.getFieldSchema(fieldSchema));
        }

        partition.getSd().setCols(fields);

        Map<String,String> partKVs = tableInfo.getPartitionValues();
        //Get partition value list
        partition.setValues(getPartitionValueList(table,partKVs));

        Map<String, String> params = new HashMap<String, String>();
        params.put(HowlConstants.HOWL_ISD_CLASS, storer.getInputSDClass());
        params.put(HowlConstants.HOWL_OSD_CLASS, storer.getOutputSDClass());

        //Copy table level howl.* keys to the partition
        for(Map.Entry<Object, Object> entry : storer.getProperties().entrySet()) {
          params.put(entry.getKey().toString(), entry.getValue().toString());
        }

        partition.setParameters(params);

        // Sets permissions and group name on partition dirs.
        FileStatus tblStat = fs.getFileStatus(tblPath);
        String grpName = tblStat.getGroup();
        FsPermission perms = tblStat.getPermission();
View Full Code Here


        // on the location being present in the 'tbl' object - so get the table
        // from the metastore
        tbl = client.getTable(dbName, tblName);
      }

      Partition part = new Partition();
      part.setDbName(dbName);
      part.setTableName(tblName);
      part.setValues(vals);
      part.setParameters(new HashMap<String, String>());
      part.setSd(tbl.getSd());
      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");

      Partition part2 = new Partition();
      part2.setDbName(dbName);
      part2.setTableName(tblName);
      part2.setValues(vals2);
      part2.setParameters(new HashMap<String, String>());
      part2.setSd(tbl.getSd());
      part2.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
      part2.getSd().setLocation(tbl.getSd().getLocation() + "/part2");

      Partition part3 = new Partition();
      part3.setDbName(dbName);
      part3.setTableName(tblName);
      part3.setValues(vals3);
      part3.setParameters(new HashMap<String, String>());
      part3.setSd(tbl.getSd());
      part3.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
      part3.getSd().setLocation(tbl.getSd().getLocation() + "/part3");

      Partition part4 = new Partition();
      part4.setDbName(dbName);
      part4.setTableName(tblName);
      part4.setValues(vals4);
      part4.setParameters(new HashMap<String, String>());
      part4.setSd(tbl.getSd());
      part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
      part4.getSd().setLocation(tbl.getSd().getLocation() + "/part4");

      // check if the partition exists (it shouldn;t)
      boolean exceptionThrown = false;
      try {
        Partition p = client.getPartition(dbName, tblName, vals);
      } catch(Exception e) {
        assertEquals("partition should not have existed",
            NoSuchObjectException.class, e.getClass());
        exceptionThrown = true;
      }
      assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
      Partition retp = client.add_partition(part);
      assertNotNull("Unable to create partition " + part, retp);
      Partition retp2 = client.add_partition(part2);
      assertNotNull("Unable to create partition " + part2, retp2);
      Partition retp3 = client.add_partition(part3);
      assertNotNull("Unable to create partition " + part3, retp3);
      Partition retp4 = client.add_partition(part4);
      assertNotNull("Unable to create partition " + part4, retp4);

      Partition part_get = client.getPartition(dbName, tblName, part.getValues());
      if(isThriftClient) {
        // since we are using thrift, 'part' will not have the create time and
        // last DDL time set since it does not get updated in the add_partition()
        // call - likewise part2 and part3 - set it correctly so that equals check
        // doesn't fail
        adjust(client, part, dbName, tblName);
        adjust(client, part2, dbName, tblName);
        adjust(client, part3, dbName, tblName);
      }
      assertTrue("Partitions are not same", part.equals(part_get));

      String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
      String part2Name = "ds=2008-07-01 14%3A13%3A12/hr=15";
      String part3Name ="ds=2008-07-02 14%3A13%3A12/hr=15";

      part_get = client.getPartition(dbName, tblName, partName);
      assertTrue("Partitions are not the same", part.equals(part_get));

      // Test partition listing with a partial spec - ds is specified but hr is not
      List<String> partialVals = new ArrayList<String>();
      partialVals.add(vals.get(0));
      Set<Partition> parts = new HashSet<Partition>();
      parts.add(part);
      parts.add(part2);

      List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
          (short) -1);
      assertTrue("Should have returned 2 partitions", partial.size() == 2);
      assertTrue("Not all parts returned", partial.containsAll(parts));

      Set<String> partNames = new HashSet<String>();
      partNames.add(partName);
      partNames.add(part2Name);
      List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
          (short) -1);
      assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
      assertTrue("Not all part names returned", partialNames.containsAll(partNames));

      // Test partition listing with a partial spec - hr is specified but ds is not
      parts.clear();
      parts.add(part2);
      parts.add(part3);

      partialVals.clear();
      partialVals.add("");
      partialVals.add(vals2.get(1));

      partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
      assertEquals("Should have returned 2 partitions", 2, partial.size());
      assertTrue("Not all parts returned", partial.containsAll(parts));

      partNames.clear();
      partNames.add(part2Name);
      partNames.add(part3Name);
      partialNames = client.listPartitionNames(dbName, tblName, partialVals,
          (short) -1);
      assertEquals("Should have returned 2 partition names", 2, partialNames.size());
      assertTrue("Not all part names returned", partialNames.containsAll(partNames));

      // Verify escaped partition names don't return partitions
      exceptionThrown = false;
      try {
        String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
        client.getPartition(dbName, tblName, badPartName);
      } catch(NoSuchObjectException e) {
        exceptionThrown = true;
      }
      assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);

      Path partPath = new Path(part2.getSd().getLocation());
      FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf);


      assertTrue(fs.exists(partPath));
      client.dropPartition(dbName, tblName, part.getValues(), true);
      assertFalse(fs.exists(partPath));

      // Test append_partition_by_name
      client.appendPartition(dbName, tblName, partName);
      Partition part5 = client.getPartition(dbName, tblName, part.getValues());
      assertTrue("Append partition by name failed", part5.getValues().equals(vals));;
      Path part5Path = new Path(part5.getSd().getLocation());
      assertTrue(fs.exists(part5Path));

      // Test drop_partition_by_name
      assertTrue("Drop partition by name failed",
          client.dropPartition(dbName, tblName, partName, true));
View Full Code Here

        // on the location being present in the 'tbl' object - so get the table
        // from the metastore
        tbl = client.getTable(dbName, tblName);
      }

      Partition part = new Partition();
      part.setDbName(dbName);
      part.setTableName(tblName);
      part.setValues(vals);
      part.setParameters(new HashMap<String, String>());
      part.setSd(tbl.getSd());
      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");

      client.add_partition(part);

      Partition part2 = client.getPartition(dbName, tblName, part.getValues());

      part2.getParameters().put("retention", "10");
      part2.getSd().setNumBuckets(12);
      part2.getSd().getSerdeInfo().getParameters().put("abc", "1");
      client.alter_partition(dbName, tblName, part2);

      Partition part3 = client.getPartition(dbName, tblName, part.getValues());
      assertEquals("couldn't alter partition", part3.getParameters().get(
          "retention"), "10");
      assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo()
          .getParameters().get("abc"), "1");
      assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(),
          12);

      client.dropTable(dbName, tblName);

      client.dropDatabase(dbName);
View Full Code Here

  }

  private static void adjust(HiveMetaStoreClient client, Partition part,
      String dbName, String tblName)
  throws NoSuchObjectException, MetaException, TException {
    Partition part_get = client.getPartition(dbName, tblName, part.getValues());
    part.setCreateTime(part_get.getCreateTime());
    part.putToParameters(org.apache.hadoop.hive.metastore.api.Constants.DDL_TIME, Long.toString(part_get.getCreateTime()));
  }
View Full Code Here

  private void add_partition(HiveMetaStoreClient client, Table table,
      List<String> vals, String location) throws InvalidObjectException,
        AlreadyExistsException, MetaException, TException {

    Partition part = new Partition();
    part.setDbName(table.getDbName());
    part.setTableName(table.getTableName());
    part.setValues(vals);
    part.setParameters(new HashMap<String, String>());
    part.setSd(table.getSd());
    part.getSd().setSerdeInfo(table.getSd().getSerdeInfo());
    part.getSd().setLocation(table.getSd().getLocation() + location);

    client.add_partition(part);
  }
View Full Code Here

    return success;
  }
 
  public Partition getPartition(String dbName, String tableName, List<String> part_vals) throws MetaException {
    this.openTransaction();
    Partition part = convertToPart(this.getMPartition(dbName, tableName, part_vals));
    this.commitTransaction();
    return part;
  }
View Full Code Here

 
  private Partition convertToPart(MPartition mpart) throws MetaException {
    if(mpart == null) {
      return null;
    }
    return new Partition(
        mpart.getValues(),
        mpart.getTable().getDatabase().getName(),
        mpart.getTable().getTableName(),
        mpart.getCreateTime(),
        mpart.getLastAccessTime(),
View Full Code Here

      return null;
    }
    try {
      // need to get the table for partition key names.
      // this is inefficient because caller of this function has table already
      Partition part = getPartitionObject(dbName, tableName, partVals);
      if(!wh.isDir(new Path(part.getSd().getLocation()))) {
        // partition doesn't exist in hdfs so return nothing
        return null;
      }
      return part;
    } catch (NoSuchObjectException e) {
View Full Code Here

  public Partition appendPartition(String dbName, String tableName, List<String> partVals)
    throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
    if(partVals.size() == 0) {
      return null;
    }
    Partition part = null;
    try {
      // need to get the table for partition key names.
      // this is inefficient because caller of this function has table already
      part = getPartitionObject(dbName, tableName, partVals);
      wh.mkdirs(new Path(part.getSd().getLocation())); // this will throw an exception if the dir couldn't be created
      return part;
    } catch (NoSuchObjectException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw new InvalidObjectException("table or database doesn't exist");
    }
View Full Code Here

        throw new MetaException("Invalid partition spec: " + partVals);
      }
      pm.put(partKeys.get(i).getName(), partVals.get(i));
    }
    Path partPath = wh.getPartitionPath(dbName, tableName, pm);
    Partition tPartition = new Partition();
    tPartition.setValues(partVals);
    tPartition.setSd(tbl.getSd()); // TODO: get a copy
    tPartition.setParameters(new HashMap<String, String>());
    tPartition.getSd().setLocation(partPath.toString());
    return tPartition;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.api.Partition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.