Package org.apache.hadoop.hive.metastore.api

Examples of org.apache.hadoop.hive.metastore.api.Partition


    assertNull(tblParams.get(HowlConstants.HOWL_ISD_CLASS));
    assertNull(tblParams.get(HowlConstants.HOWL_OSD_CLASS));

    List<String> partVals = new ArrayList<String>(1);
    partVals.add("2010-10-10");
    Partition part = msc.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partVals);

    assertEquals(RCFileInputFormat.class.getName(),part.getSd().getInputFormat());
    assertEquals(RCFileOutputFormat.class.getName(),part.getSd().getOutputFormat());

    Map<String,String> partParams = part.getParameters();
    assertEquals(RCFileInputDriver.class.getName(), partParams.get(HowlConstants.HOWL_ISD_CLASS));
    assertEquals(RCFileOutputDriver.class.getName(), partParams.get(HowlConstants.HOWL_OSD_CLASS));

    howlDriver.run("drop table junit_sem_analysis");
  }
View Full Code Here


    assertNull(resp.getErrorMessage());

    resp = howlDriver.run("alter table junit_pigstorage_delim partition (b='2010-10-10') set fileformat inputformat '" + RCFileInputFormat.class.getName()
        +"' outputformat '"+RCFileOutputFormat.class.getName()+"' inputdriver '"+MyPigStorageDriver.class.getName()+"' outputdriver 'non-existent'");

    Partition part = msc.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, "junit_pigstorage_delim", "b=2010-10-10");
    Map<String,String> partParms = part.getParameters();
    partParms.put(PigStorageInputDriver.delim, "control-A");

    msc.alter_partition(MetaStoreUtils.DEFAULT_DATABASE_NAME, "junit_pigstorage_delim", part);

    PigServer server = new PigServer(ExecType.LOCAL, howlConf.getAllProperties());
View Full Code Here

  }

  public Partition getPartition(String dbName, String tableName,
      List<String> part_vals) throws NoSuchObjectException, MetaException {
    openTransaction();
    Partition part = convertToPart(getMPartition(dbName, tableName, part_vals));
    commitTransaction();
    if(part == null) {
      throw new NoSuchObjectException("partition values="
          + part_vals.toString());
    }
View Full Code Here

  private Partition convertToPart(MPartition mpart) throws MetaException {
    if (mpart == null) {
      return null;
    }
    return new Partition(mpart.getValues(), mpart.getTable().getDatabase()
        .getName(), mpart.getTable().getTableName(), mpart.getCreateTime(),
        mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()),
        mpart.getParameters());
  }
View Full Code Here

      List<MPartition> mparts = listMPartitions(dbName, tblName, maxParts);
      List<Partition> parts = new ArrayList<Partition>(mparts.size());
      if (mparts != null && mparts.size()>0) {
        for (MPartition mpart : mparts) {
          MTable mtbl = mpart.getTable();
          Partition part = convertToPart(mpart);
          parts.add(part);

          if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
            String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
                .getPartitionKeys()), part.getValues());
            PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName,
                tblName, partName, userName, groupNames);
            part.setPrivileges(partAuth);
          }
        }
      }
      success =  commitTransaction();
      return parts;
View Full Code Here

      if (mpart == null) {
        commitTransaction();
        throw new NoSuchObjectException("partition values="
            + partVals.toString());
      }
      Partition part = null;
      MTable mtbl = mpart.getTable();
      part = convertToPart(mpart);
      if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
        String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
            .getPartitionKeys()), partVals);
        PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName,
            tblName, partName, user_name, group_names);
        part.setPrivileges(partAuth);
      }

      success = commitTransaction();
      return part;
    } finally {
View Full Code Here

  public void publishTest(Job job) throws Exception {
    HowlOutputCommitter committer = new HowlOutputCommitter(null);
    committer.cleanupJob(job);

    Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
    assertNotNull(part);

    StorerInfo storer = InitializeInput.extractStorerInfo(part.getSd(),part.getParameters());
    assertEquals(storer.getInputSDClass(), "testInputClass");
    assertEquals(storer.getProperties().get("howl.testarg"), "testArgValue");
    assertTrue(part.getSd().getLocation().indexOf("p1") != -1);
  }
View Full Code Here

    Table table2 = client.getTable("default", "junit_parted_noisd");
    assertFalse(table2.getParameters().containsKey(HowlConstants.HOWL_ISD_CLASS));
    assertTrue(table2.getSd().getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS));
   
    // assert that there is one partition present, and it had howl instrumentation inserted when it was created.
    Partition ptn = client.getPartition("default", "junit_parted_noisd", Arrays.asList("42"));

    assertNotNull(ptn);
    assertTrue(ptn.getParameters().containsKey(HowlConstants.HOWL_ISD_CLASS));
    assertTrue(ptn.getSd().getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS));
    driver.run("drop table junit_unparted_noisd");
  }
View Full Code Here

    private Partition append_partition_common(RawStore ms, String dbName, String tableName,
        List<String> part_vals) throws InvalidObjectException,
        AlreadyExistsException, MetaException {

      Partition part = new Partition();
      boolean success = false, madeDir = false;
      Path partLocation = null;
      try {
        ms.openTransaction();
        part = new Partition();
        part.setDbName(dbName);
        part.setTableName(tableName);
        part.setValues(part_vals);

        Table tbl = ms.getTable(part.getDbName(), part.getTableName());
        if (tbl == null) {
          throw new InvalidObjectException(
              "Unable to add partition because table or database do not exist");
        }

        part.setSd(tbl.getSd());
        partLocation = new Path(tbl.getSd().getLocation(), Warehouse
            .makePartName(tbl.getPartitionKeys(), part_vals));
        part.getSd().setLocation(partLocation.toString());

        Partition old_part = null;
        try {
          old_part = get_partition(part.getDbName(), part
            .getTableName(), part.getValues());
        } catch (NoSuchObjectException e) {
          // this means there is no existing partition
View Full Code Here

        for (String part : part_vals) {
          LOG.debug(part);
        }
      }

      Partition ret = null;
      try {
        ret = executeWithRetry(new Command<Partition>() {
          @Override
          Partition run(RawStore ms) throws Exception {
            return append_partition_common(ms, dbName, tableName, part_vals);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.api.Partition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.