Package org.apache.hive.hcatalog.data.schema

Examples of org.apache.hive.hcatalog.data.schema.HCatFieldSchema


    boolean isExceptionCaught = false;
    // Table creation with a long table name causes ConnectionFailureException
    final String tableName = "Temptable" + new BigInteger(200, new Random()).toString(2);

    ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
    cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
    cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
    try {
      HCatCreateTableDesc tableDesc = HCatCreateTableDesc
        .create(null, tableName, cols).fileFormat("rcfile").build();
      client.createTable(tableDesc);
    } catch (Exception exp) {
View Full Code Here


    HCatClient client = HCatClient.create(new Configuration(hcatConf));
    String tableName = "Temptable";
    boolean isExceptionCaught = false;
    client.dropTable(null, tableName, true);
    ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
    cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
    cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
    try {
      HCatCreateTableDesc tableDesc = HCatCreateTableDesc
        .create(null, tableName, cols).fileFormat("rcfile").build();
      client.createTable(tableDesc);
      // The DB foo is non-existent.
View Full Code Here

      final String tableName = "testUpdateTableSchema_TableName";

      client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);

      client.createDatabase(HCatCreateDBDesc.create(dbName).build());
      List<HCatFieldSchema> oldSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
          new HCatFieldSchema("bar", Type.STRING, ""));
      client.createTable(HCatCreateTableDesc.create(dbName, tableName, oldSchema).build());

      List<HCatFieldSchema> newSchema = Arrays.asList(new HCatFieldSchema("completely", Type.DOUBLE, ""),
          new HCatFieldSchema("new", Type.FLOAT, ""),
          new HCatFieldSchema("fields", Type.STRING, ""));

      client.updateTableSchema(dbName, tableName, newSchema);

      assertArrayEquals(newSchema.toArray(), client.getTable(dbName, tableName).getCols().toArray());
View Full Code Here

            exception instanceof ObjectNotFoundException);
      }

      String partitionColumn = "part";

      List<HCatFieldSchema> columns = Arrays.asList(new HCatFieldSchema("col", Type.STRING, ""));
      ArrayList<HCatFieldSchema> partitionColumns = new ArrayList<HCatFieldSchema>(
          Arrays.asList(new HCatFieldSchema(partitionColumn, Type.STRING, "")));
      HCatTable table = new HCatTable(dbName, tableName).cols(columns).partCols(partitionColumns);
      client.createTable(HCatCreateTableDesc.create(table, false).build());

      Map<String, String> partitionSpec = new HashMap<String, String>();
      partitionSpec.put(partitionColumn, "foobar");
View Full Code Here

      client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
      client.createDatabase(HCatCreateDBDesc.create(dbName).build());
      String messageBusTopicName = "MY.topic.name";
      Map<String, String> tableProperties = new HashMap<String, String>(1);
      tableProperties.put(HCatConstants.HCAT_MSGBUS_TOPIC_NAME, messageBusTopicName);
      client.createTable(HCatCreateTableDesc.create(dbName, tableName, Arrays.asList(new HCatFieldSchema("foo", Type.STRING, ""))).tblProps(tableProperties).build());

      assertEquals("MessageBus topic-name doesn't match!", messageBusTopicName, client.getMessageBusTopicName(dbName, tableName));
      client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
      client.close();
    }
View Full Code Here

      final String tableName = "myTable";

      client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);

      client.createDatabase(HCatCreateDBDesc.create(dbName).build());
      List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
          new HCatFieldSchema("bar", Type.STRING, ""));

      List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
          new HCatFieldSchema("grid", Type.STRING, ""));

      client.createTable(HCatCreateTableDesc.create(dbName, tableName, columnSchema).partCols(partitionSchema).build());

      HCatTable table = client.getTable(dbName, tableName);
      List<HCatFieldSchema> partitionColumns = table.getPartCols();
View Full Code Here

      final String tableName = "myTable";

      client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);

      client.createDatabase(HCatCreateDBDesc.create(dbName).build());
      List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
          new HCatFieldSchema("bar", Type.STRING, ""));

      List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
          new HCatFieldSchema("grid", Type.STRING, ""));

      HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
      client.createTable(HCatCreateTableDesc.create(table, false).build());

      // Verify that the table was created successfully.
View Full Code Here

      final String tableName = "myTable";

      client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);

      client.createDatabase(HCatCreateDBDesc.create(dbName).build());
      List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
          new HCatFieldSchema("bar", Type.STRING, ""));

      List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
          new HCatFieldSchema("grid", Type.STRING, ""));

      HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
      client.createTable(HCatCreateTableDesc.create(table, false).build());

      // Verify that the table was created successfully.
View Full Code Here

      final String tableName = "myTable";

      sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);

      sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
      List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
          new HCatFieldSchema("bar", Type.STRING, ""));

      List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
          new HCatFieldSchema("grid", Type.STRING, ""));

      HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
      sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build());

      // Verify that the sourceTable was created successfully.
      sourceTable = sourceMetaStore.getTable(dbName, tableName);
      assertNotNull("Table couldn't be queried for. ", sourceTable);

      // Serialize Table definition. Deserialize using the target HCatClient instance.
      String tableStringRep = sourceMetaStore.serializeTable(sourceTable);
      HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf));
      targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
      targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());

      HCatTable targetTable = targetMetaStore.deserializeTable(tableStringRep);

      assertEquals("Table after deserialization should have been identical to sourceTable.",
          sourceTable.diff(targetTable), HCatTable.NO_DIFF);

      // Create table on Target.
      targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build());
      // Verify that the created table is identical to sourceTable.
      targetTable = targetMetaStore.getTable(dbName, tableName);
      assertEquals("Table after deserialization should have been identical to sourceTable.",
          sourceTable.diff(targetTable), HCatTable.NO_DIFF);

      // Modify sourceTable.
      List<HCatFieldSchema> newColumnSchema = new ArrayList<HCatFieldSchema>(columnSchema);
      newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, ""));
      Map<String, String> tableParams = new HashMap<String, String>(1);
      tableParams.put("orc.compress", "ZLIB");
      sourceTable.cols(newColumnSchema) // Add a column.
                 .fileFormat("orcfile")     // Change SerDe, File I/O formats.
                 .tblProps(tableParams)
View Full Code Here

      sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);

      sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
      List<HCatFieldSchema> columnSchema = new ArrayList<HCatFieldSchema>(
          Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
                        new HCatFieldSchema("bar", Type.STRING, "")));

      List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
                                                            new HCatFieldSchema("grid", Type.STRING, ""));

      HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema)
                                                              .partCols(partitionSchema)
                                                              .comment("Source table.");

      sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build());

      // Verify that the sourceTable was created successfully.
      sourceTable = sourceMetaStore.getTable(dbName, tableName);
      assertNotNull("Table couldn't be queried for. ", sourceTable);

      // Partitions added now should inherit table-schema, properties, etc.
      Map<String, String> partitionSpec_1 = new HashMap<String, String>();
      partitionSpec_1.put("grid", "AB");
      partitionSpec_1.put("dt", "2011_12_31");
      HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, "");

      sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build());
      assertEquals("Unexpected number of partitions. ",
                   sourceMetaStore.getPartitions(dbName, tableName).size(), 1);
      // Verify that partition_1 was added correctly, and properties were inherited from the HCatTable.
      HCatPartition addedPartition_1 = sourceMetaStore.getPartition(dbName, tableName, partitionSpec_1);
      assertEquals("Column schema doesn't match.", addedPartition_1.getColumns(), sourceTable.getCols());
      assertEquals("InputFormat doesn't match.", addedPartition_1.getInputFormat(), sourceTable.getInputFileFormat());
      assertEquals("OutputFormat doesn't match.", addedPartition_1.getOutputFormat(), sourceTable.getOutputFileFormat());
      assertEquals("SerDe doesn't match.", addedPartition_1.getSerDe(), sourceTable.getSerdeLib());
      assertEquals("SerDe params don't match.", addedPartition_1.getSerdeParams(), sourceTable.getSerdeParams());

      // Replicate table definition.

      HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf));
      targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);

      targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
      // Make a copy of the source-table, as would be done across class-loaders.
      HCatTable targetTable = targetMetaStore.deserializeTable(sourceMetaStore.serializeTable(sourceTable));
      targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build());
      targetTable = targetMetaStore.getTable(dbName, tableName);

      assertEquals("Created table doesn't match the source.",
                  targetTable.diff(sourceTable), HCatTable.NO_DIFF);

      // Modify Table schema at the source.
      List<HCatFieldSchema> newColumnSchema = new ArrayList<HCatFieldSchema>(columnSchema);
      newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, ""));
      Map<String, String> tableParams = new HashMap<String, String>(1);
      tableParams.put("orc.compress", "ZLIB");
      sourceTable.cols(newColumnSchema) // Add a column.
          .fileFormat("orcfile")     // Change SerDe, File I/O formats.
          .tblProps(tableParams)
View Full Code Here

TOP

Related Classes of org.apache.hive.hcatalog.data.schema.HCatFieldSchema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.