Examples of JobContext


Examples of org.apache.hadoop.mapreduce.JobContext

    writer.append(bytes2);
    writer.close();
    BytesRefArrayWritable[] bytesArr = new BytesRefArrayWritable[]{bytes,bytes2};

    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildPrunedSchema());

    sd.initialize(jc, getProps());
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getPrunedRecords();
    for(int j=0; j < 2; j++){
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

    writer.append(bytes2);
    writer.close();
    BytesRefArrayWritable[] bytesArr = new BytesRefArrayWritable[]{bytes,bytes2};

    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildReorderedSchema());

    sd.initialize(jc, getProps());
    Map<String,String> map = new HashMap<String,String>(1);
    map.put("part1", "first-part");
    sd.setPartitionValues(jc, map);
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getReorderedCols();
    for(int j=0; j < 2; j++){
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

      writer.close();

      RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
      Configuration jonconf = new Configuration(cloneConf);
      jonconf.set("mapred.input.dir", testDir.toString());
      JobContext context = new Job(jonconf);
      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
      List<InputSplit> splits = inputFormat.getSplits(context);
      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
      int readCount = 0;
      for (int i = 0; i < splits.size(); i++) {
        TaskAttemptContext tac = new TaskAttemptContext(jonconf, new TaskAttemptID());
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

   
    static JobContext setUpContext(JobContext context,
            POStore store) throws IOException {
        // make a copy of the context so that the actions after this call
        // do not end up updating the same context
        JobContext contextCopy = new JobContext(
                context.getConfiguration(), context.getJobID());
       
        // call setLocation() on the storeFunc so that if there are any
        // side effects like setting map.output.dir on the Configuration
        // in the Context are needed by the OutputCommitter, those actions
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

   
    @Override
    public void cleanupJob(JobContext context) throws IOException {
        // call clean up on all map and reduce committers
        for (Pair<OutputCommitter, POStore> mapCommitter : mapOutputCommitters) {           
            JobContext updatedContext = setUpContext(context,
                    mapCommitter.second);
            storeCleanup(mapCommitter.second, updatedContext.getConfiguration());
            mapCommitter.first.cleanupJob(updatedContext);
        }
        for (Pair<OutputCommitter, POStore> reduceCommitter :
            reduceOutputCommitters) {           
            JobContext updatedContext = setUpContext(context,
                    reduceCommitter.second);
            storeCleanup(reduceCommitter.second, updatedContext.getConfiguration());
            reduceCommitter.first.cleanupJob(updatedContext);
        }
      
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

   
    @Override
    public void setupJob(JobContext context) throws IOException {
        // call set up on all map and reduce committers
        for (Pair<OutputCommitter, POStore> mapCommitter : mapOutputCommitters) {
            JobContext updatedContext = setUpContext(context,
                    mapCommitter.second);
            mapCommitter.first.setupJob(updatedContext);
        }
        for (Pair<OutputCommitter, POStore> reduceCommitter :
            reduceOutputCommitters) {
            JobContext updatedContext = setUpContext(context,
                    reduceCommitter.second);
            reduceCommitter.first.setupJob(updatedContext);
        }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

    private void checkOutputSpecsHelper(List<POStore> stores, JobContext
            jobcontext) throws IOException, InterruptedException {
        for (POStore store : stores) {
            // make a copy of the original JobContext so that
            // each OutputFormat get a different copy
            JobContext jobContextCopy = new JobContext(
                    jobcontext.getConfiguration(), jobcontext.getJobID());
           
            // set output location
            PigOutputFormat.setLocation(jobContextCopy, store);
           
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

     * @param job
     * @param st
     * @throws IOException
     */
    private void storeSchema(Job job, POStore st) throws IOException {
        JobContext jc = new JobContext(job.getJobConf(),
                new org.apache.hadoop.mapreduce.JobID());
        JobContext updatedJc = PigOutputCommitter.setUpContext(jc, st);
        PigOutputCommitter.storeCleanup(st, updatedJc.getConfiguration());
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.JobContext

      getStorageFormatOfKey(columnsMapping.get(iKey).mappingSpec,
      jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string")));

    setScan(scan);
    Job job = new Job(jobConf);
    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);
    Path [] tablePaths = FileInputFormat.getInputPaths(jobContext);

    List<org.apache.hadoop.mapreduce.InputSplit> splits =
      super.getSplits(jobContext);
    InputSplit [] results = new InputSplit[splits.size()];
View Full Code Here

Examples of org.apache.helix.task.JobContext

    // Wait for job completion
    TestUtil.pollForWorkflowState(_manager, jobResource, TaskState.COMPLETED);

    // Ensure all partitions are completed individually
    JobContext ctx = TaskUtil.getJobContext(_manager, TaskUtil.getNamespacedJobName(jobResource));
    for (int i = 0; i < NUM_PARTITIONS; i++) {
      Assert.assertEquals(ctx.getPartitionState(i), TaskPartitionState.COMPLETED);
      Assert.assertEquals(ctx.getPartitionNumAttempts(i), 1);
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.