Package org.apache.hive.hcatalog.data.schema

Examples of org.apache.hive.hcatalog.data.schema.HCatSchema


  /**
   * gets values for fields requested by output schema which will not be in the data
   */
  private static Map<String, String> getColValsNotInDataColumns(HCatSchema outputSchema,
                                  PartInfo partInfo) {
    HCatSchema dataSchema = partInfo.getPartitionSchema();
    Map<String, String> vals = new HashMap<String, String>();
    for (String fieldName : outputSchema.getFieldNames()) {
      if (dataSchema.getPosition(fieldName) == null) {
        // this entry of output is not present in the output schema
        // so, we first check the table schema to see if it is a part col

        if (partInfo.getPartitionValues().containsKey(fieldName)) {
          vals.put(fieldName, partInfo.getPartitionValues().get(fieldName));
View Full Code Here


   *                     for the current context
   */
  public static HCatSchema getTableSchema(Configuration conf)
    throws IOException {
    InputJobInfo inputJobInfo = getJobInfo(conf);
    HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
    for (HCatFieldSchema field :
      inputJobInfo.getTableInfo().getDataColumns().getFields()) {
      allCols.append(field);
    }
    for (HCatFieldSchema field :
      inputJobInfo.getTableInfo().getPartitionColumns().getFields()) {
      allCols.append(field);
    }
    return allCols;
  }
View Full Code Here

    // We also need to update the output Schema with these deletions.

    // Note that, output storage handlers never sees partition columns in data
    // or schema.

    HCatSchema schemaWithoutParts = new HCatSchema(schema.getFields());
    for (String partKey : partMap.keySet()) {
      Integer idx;
      if ((idx = schema.getPosition(partKey)) != null) {
        posOfPartCols.add(idx);
        schemaWithoutParts.remove(schema.get(partKey));
      }
    }

    // Also, if dynamic partitioning is being used, we want to
    // set appropriate list of columns for the columns to be dynamically specified.
    // These would be partition keys too, so would also need to be removed from
    // output schema and partcols

    if (jobInfo.isDynamicPartitioningUsed()) {
      for (String partKey : jobInfo.getDynamicPartitioningKeys()) {
        Integer idx;
        if ((idx = schema.getPosition(partKey)) != null) {
          posOfPartCols.add(idx);
          posOfDynPartCols.add(idx);
          schemaWithoutParts.remove(schema.get(partKey));
        }
      }
    }

    HCatUtil.validatePartitionSchema(
View Full Code Here

      context.write(NullWritable.get(), record);
    }
  }

  private HCatSchema getSchema() throws HCatException {
    HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
    schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
        ""));
    schema.append(new HCatFieldSchema("a1",
        HCatFieldSchema.Type.STRING, ""));
    schema.append(new HCatFieldSchema("a2",
        HCatFieldSchema.Type.STRING, ""));
    return schema;
  }
View Full Code Here

    job.setMapOutputKeyClass(BytesWritable.class);
    job.setMapOutputValueClass(DefaultHCatRecord.class);

    job.setNumReduceTasks(0);

    HCatOutputFormat.setSchema(job, new HCatSchema(columns));

    boolean success = job.waitForCompletion(true);
    Assert.assertTrue(success == false);
  }
View Full Code Here

      return result;
    }
  }

  public static HCatSchema extractSchema(Table table) throws HCatException {
    return new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));
  }
View Full Code Here

  public static HCatSchema extractSchema(Table table) throws HCatException {
    return new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));
  }

  public static HCatSchema extractSchema(Partition partition) throws HCatException {
    return new HCatSchema(HCatUtil.getHCatFieldSchemaList(partition.getCols()));
  }
View Full Code Here

    throws NoSuchObjectException, TException, MetaException {
    return new Table(client.getTable(dbName, tableName));
  }

  public static HCatSchema getTableSchemaWithPtnCols(Table table) throws IOException {
    HCatSchema tableSchema = new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));

    if (table.getPartitionKeys().size() != 0) {

      // add partition keys to table schema
      // NOTE : this assumes that we do not ever have ptn keys as columns
      // inside the table schema as well!
      for (FieldSchema fs : table.getPartitionKeys()) {
        tableSchema.append(HCatSchemaUtils.getHCatFieldSchema(fs));
      }
    }
    return tableSchema;
  }
View Full Code Here

   * @param table the instance to extract partition columns from
   * @return HCatSchema instance which contains the partition columns
   * @throws IOException
   */
  public static HCatSchema getPartitionColumns(Table table) throws IOException {
    HCatSchema cols = new HCatSchema(new LinkedList<HCatFieldSchema>());
    if (table.getPartitionKeys().size() != 0) {
      for (FieldSchema fs : table.getPartitionKeys()) {
        cols.append(HCatSchemaUtils.getHCatFieldSchema(fs));
      }
    }
    return cols;
  }
View Full Code Here

      partitionVals = new HashMap<String, String>(1);
      partitionVals.put(s[0], val);
    }
    HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
      outputTableName, partitionVals));
    HCatSchema s = HCatInputFormat.getTableSchema(job);
    // Build the schema for this table, which is slightly different than the
    // schema for the input table
    List<HCatFieldSchema> fss = new ArrayList<HCatFieldSchema>(3);
    fss.add(s.get(0));
    fss.add(s.get(1));
    fss.add(s.get(3));
    HCatOutputFormat.setSchema(job, new HCatSchema(fss));
    job.setOutputFormatClass(HCatOutputFormat.class);
    return (job.waitForCompletion(true) ? 0 : 1);
  }
View Full Code Here

TOP

Related Classes of org.apache.hive.hcatalog.data.schema.HCatSchema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.