Package com.facebook.giraph.hive.impl.input

Examples of com.facebook.giraph.hive.impl.input.InputInfo


        return tableSchema.positionOf(input);
      }
    };
    List<Integer> columnIds = transform(inputDesc.getColumns(), columnNameToId);

    InputInfo inputInfo = new InputInfo(tableSchema, columnIds);

    if (table.getPartitionKeysSize() == 0) {
      // table without partitions
      inputInfo.addPartition(new InputPartition(table));
    } else {
      // table with partitions, find matches to user filter.
      List<Partition> partitions = null;
      try {
        partitions = client.get_partitions_by_filter(dbName, tableName,
            inputDesc.getPartitionFilter(), (short) -1);
      } catch (NoSuchObjectException e) {
        throw new TException(e.getMessage());
      } catch (MetaException e) {
        throw new TException(e);
      }
      for (Partition partition : partitions) {
        inputInfo.addPartition(new InputPartition(table, partition));
      }
    }

    InputConf inputConf = new InputConf(conf, profileId);
    inputConf.writeNumSplitsToConf(inputDesc.getNumSplits());
View Full Code Here


    InputConf inputConf = new InputConf(conf, myProfileId);

    LOG.info("getSplits for profile " + inputConf.getProfileId());

    JobConf jobConf = new JobConf(conf);
    InputInfo inputInfo = inputConf.readInputInfoFromConf();

    int partitionNum = 0;
    List<InputSplit> splits = Lists.newArrayList();
    Iterable<InputPartition> partitions = inputInfo.getPartitions();

    for (InputPartition inputPartition : partitions) {
      org.apache.hadoop.mapred.InputFormat baseInputFormat =
          inputPartition.makeInputFormat(conf);
      HadoopUtils.setInputDir(jobConf, inputPartition.getLocation());

      int splitsRequested = inputConf.readNumSplitsFromConf();
      org.apache.hadoop.mapred.InputSplit[] baseSplits =
          baseInputFormat.getSplits(jobConf, splitsRequested);
      LOG.info("Requested " + splitsRequested + " from partition (" +
          partitionNum + " out of " + Iterables.size(partitions) +
          ") values: " +
          inputPartition.getInputSplitData().getPartitionValues() +
          ", got " + baseSplits.length + " splits");

      for (org.apache.hadoop.mapred.InputSplit baseSplit : baseSplits)  {
        InputSplit split = new HiveApiInputSplit(baseInputFormat, baseSplit,
            inputInfo.getTableSchema(), inputInfo.getColumnIds(),
            inputPartition.getInputSplitData(), conf);
        splits.add(split);
      }

      partitionNum++;
View Full Code Here

    HiveTableSchemas.putForName(conf, dbName, tableName, tableSchema);
    HiveTableSchemas.putForProfile(conf, profileId, tableSchema);

    List<Integer> columnIds = transform(inputDesc.getColumns(), schemaLookupFunc(tableSchema));

    InputInfo inputInfo = new InputInfo(tableSchema, columnIds);

    if (table.getPartitionKeysSize() == 0) {
      // table without partitions
      inputInfo.addPartition(InputPartition.newFromHiveTable(table));
    } else {
      // table with partitions, find matches to user filter.
      List<Partition> partitions = null;
      try {
        partitions = client.get_partitions_by_filter(dbName, tableName,
            inputDesc.getPartitionFilter(), (short) -1);
      } catch (NoSuchObjectException e) {
        throw new TException(e.getMessage());
      } catch (MetaException e) {
        throw new TException(e);
      }
      for (Partition partition : partitions) {
        inputInfo.addPartition(InputPartition.newFromHivePartition(partition));
      }
    }

    InputConf inputConf = new InputConf(conf, profileId);
    inputConf.writeNumSplitsToConf(inputDesc.getNumSplits());
View Full Code Here

    InputConf inputConf = new InputConf(conf, myProfileId);

    LOG.info("getSplits for profile " + inputConf.getProfileId());

    JobConf jobConf = new JobConf(conf);
    InputInfo inputInfo = inputConf.readInputInfoFromConf();

    int partitionNum = 0;
    List<InputSplit> splits = Lists.newArrayList();
    Iterable<InputPartition> partitions = inputInfo.getPartitions();

    for (InputPartition inputPartition : partitions) {
      org.apache.hadoop.mapred.InputFormat baseInputFormat =
          inputPartition.makeInputFormat(conf);
      HadoopUtils.setInputDir(jobConf, inputPartition.getLocation());

      int splitsRequested = inputConf.readNumSplitsFromConf();
      org.apache.hadoop.mapred.InputSplit[] baseSplits =
          baseInputFormat.getSplits(jobConf, splitsRequested);
      LOG.info("Requested " + splitsRequested + " from partition (" +
          partitionNum + " out of " + Iterables.size(partitions) +
          ") values: " +
          inputPartition.getInputSplitData().getPartitionValues() +
          ", got " + baseSplits.length + " splits");

      for (org.apache.hadoop.mapred.InputSplit baseSplit : baseSplits)  {
        InputSplit split = new HiveApiInputSplit(baseInputFormat, baseSplit,
            inputInfo.getTableSchema(), inputInfo.getColumnIds(),
            inputPartition.getInputSplitData(), conf);
        splits.add(split);
      }

      partitionNum++;
View Full Code Here

TOP

Related Classes of com.facebook.giraph.hive.impl.input.InputInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.