Package org.apache.hive.hcatalog.data.schema

Examples of org.apache.hive.hcatalog.data.schema.HCatSchema


        outputJobInfo.setPartitionValues(valueMap);
      }

      // To get around hbase failure on single node, see BUG-4383
      conf.set("dfs.client.read.shortcircuit", "false");
      HCatSchema tableSchema = HCatUtil.extractSchema(table);
      StorerInfo storerInfo =
        InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters());

      List<String> partitionCols = new ArrayList<String>();
      for (FieldSchema schema : table.getPartitionKeys()) {
View Full Code Here


        HCatContext.INSTANCE.getConf().get().getBoolean(
          HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION,
          HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT));
    }

    HCatSchema tableSchema = inpy.getTableSchema(ijob.getConfiguration());
    System.err.println("Copying from ["+in+"] to ["+out+"] with schema : "+ tableSchema.toString());
    oupy.setSchema(ojob, tableSchema);
    oupy.checkOutputSpecs(ojob);
    OutputCommitter oc = oupy.getOutputCommitter(createTaskAttemptContext(ojob.getConfiguration()));
    oc.setupJob(ojob);
View Full Code Here

    // We also need to update the output Schema with these deletions.

    // Note that, output storage handlers never sees partition columns in data
    // or schema.

    HCatSchema schemaWithoutParts = new HCatSchema(schema.getFields());
    for (String partKey : partMap.keySet()) {
      Integer idx;
      if ((idx = schema.getPosition(partKey)) != null) {
        posOfPartCols.add(idx);
        schemaWithoutParts.remove(schema.get(partKey));
      }
    }

    // Also, if dynamic partitioning is being used, we want to
    // set appropriate list of columns for the columns to be dynamically specified.
    // These would be partition keys too, so would also need to be removed from
    // output schema and partcols

    if (jobInfo.isDynamicPartitioningUsed()) {
      for (String partKey : jobInfo.getDynamicPartitioningKeys()) {
        Integer idx;
        if ((idx = schema.getPosition(partKey)) != null) {
          posOfPartCols.add(idx);
          posOfDynPartCols.add(idx);
          schemaWithoutParts.remove(schema.get(partKey));
        }
      }
    }

    HCatUtil.validatePartitionSchema(
View Full Code Here


  //test that new columns gets added to table schema
  private void tableSchemaTest() throws Exception {

    HCatSchema tableSchema = getTableSchema();

    assertEquals(4, tableSchema.getFields().size());

    //Update partition schema to have 3 fields
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", serdeConstants.STRING_TYPE_NAME, "")));

    writeRecords = new ArrayList<HCatRecord>();

    for (int i = 0; i < 20; i++) {
      List<Object> objList = new ArrayList<Object>();

      objList.add(i);
      objList.add("strvalue" + i);
      objList.add("str2value" + i);

      writeRecords.add(new DefaultHCatRecord(objList));
    }

    Map<String, String> partitionMap = new HashMap<String, String>();
    partitionMap.put("part1", "p1value5");
    partitionMap.put("part0", "p0value5");

    runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);

    tableSchema = getTableSchema();

    //assert that c3 has got added to table schema
    assertEquals(5, tableSchema.getFields().size());
    assertEquals("c1", tableSchema.getFields().get(0).getName());
    assertEquals("c2", tableSchema.getFields().get(1).getName());
    assertEquals("c3", tableSchema.getFields().get(2).getName());
    assertEquals("part1", tableSchema.getFields().get(3).getName());
    assertEquals("part0", tableSchema.getFields().get(4).getName());

    //Test that changing column data type fails
    partitionMap.clear();
    partitionMap.put("part1", "p1value6");
    partitionMap.put("part0", "p0value6");
View Full Code Here

  }

  //check behavior while change the order of columns
  private void columnOrderChangeTest() throws Exception {

    HCatSchema tableSchema = getTableSchema();

    assertEquals(5, tableSchema.getFields().size());

    partitionColumns = new ArrayList<HCatFieldSchema>();
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", serdeConstants.INT_TYPE_NAME, "")));
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", serdeConstants.STRING_TYPE_NAME, "")));
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", serdeConstants.STRING_TYPE_NAME, "")));
View Full Code Here

   * @throws HCatException
   */
  public void testGetSetByType1() throws HCatException {
    HCatRecord inpRec = getHCatRecords()[0];
    HCatRecord newRec = new DefaultHCatRecord(inpRec.size());
    HCatSchema hsch =
        HCatSchemaUtils.getHCatSchema(
            "a:tinyint,b:smallint,c:int,d:bigint,e:float,f:double,g:boolean,h:string,i:binary,j:string");


    newRec.setByte("a", hsch, inpRec.getByte("a", hsch));
View Full Code Here

   */
  public void testGetSetByType2() throws HCatException {
    HCatRecord inpRec = getGetSet2InpRec();

    HCatRecord newRec = new DefaultHCatRecord(inpRec.size());
    HCatSchema hsch =
        HCatSchemaUtils.getHCatSchema("a:binary,b:map<string,string>,c:array<int>,d:struct<i:int>");


    newRec.setByteArray("a", hsch, inpRec.getByteArray("a", hsch));
    newRec.setMap("b", hsch, inpRec.getMap("b", hsch));
View Full Code Here

    }
  }

  private HCatSchema getProjectionSchema() throws HCatException {

    HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
    schema.append(new HCatFieldSchema("key", HCatFieldSchema.Type.STRING,
      ""));
    schema.append(new HCatFieldSchema("testqualifier1",
      HCatFieldSchema.Type.STRING, ""));
    return schema;
  }
View Full Code Here

    String[] cNames, boolean dontCreate, boolean isQuery) throws Exception {
    CreateMode mode = CreateMode.CREATE;
    if (dontCreate) {
      mode = CreateMode.NO_CREATION;
    }
    HCatSchema tblSchema =
      utils.createHCatTable(mode, totalRecords, table, cols);
    utils.createSqlTable(getConnection(), false, totalRecords, table, cols);
    addlArgsArray.add("-m");
    addlArgsArray.add("1");
    addlArgsArray.add("--hcatalog-table");
View Full Code Here

  }

  public HCatSchema createHCatTable(CreateMode mode, int count,
    String table, ColumnGenerator... extraCols)
    throws Exception {
    HCatSchema hCatTblSchema = generateHCatTableSchema(extraCols);
    HCatSchema hCatPartSchema = generateHCatPartitionSchema(extraCols);
    HCatSchema hCatFullSchema = new HCatSchema(hCatTblSchema.getFields());
    for (HCatFieldSchema hfs : hCatPartSchema.getFields()) {
      hCatFullSchema.append(hfs);
    }
    if (mode != CreateMode.NO_CREATION) {

      createHCatTableUsingSchema(null, table,
        hCatTblSchema.getFields(), hCatPartSchema.getFields());
      if (mode == CreateMode.CREATE_AND_LOAD) {
        HCatSchema hCatLoadSchema = new HCatSchema(hCatTblSchema.getFields());
        HCatSchema dynPartSchema =
          generateHCatDynamicPartitionSchema(extraCols);
        for (HCatFieldSchema hfs : dynPartSchema.getFields()) {
          hCatLoadSchema.append(hfs);
        }
        loadHCatTable(hCatLoadSchema, table, count, extraCols);
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.hive.hcatalog.data.schema.HCatSchema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.