Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.JobConf


    private FileSystem fs;

    @Override
    public void conf(JobConf job) {

        JobConf conf = job;
        try {

            this.cluster = new ClusterMapper().readCluster(new StringReader(conf.get("cluster.xml")));
            List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new StringReader(conf.get("stores.xml")));
            if(storeDefs.size() != 1)
                throw new IllegalStateException("Expected to find only a single store, but found multiple!");
            this.storeDef = storeDefs.get(0);

            this.numChunks = conf.getInt("num.chunks", -1);
            if(this.numChunks < 1)
                throw new VoldemortException("num.chunks not specified in the job conf.");

            this.saveKeys = conf.getBoolean("save.keys", false);
            this.reducerPerBucket = conf.getBoolean("reducer.per.bucket", false);
            this.conf = job;
            this.outputDir = job.get("final.output.dir");
            this.taskId = job.get("mapred.task.id");
            this.checkSumType = CheckSum.fromString(job.get("checksum.type"));
View Full Code Here


    }

    @Override
    public void configure(JobConf job) {

        JobConf conf = job;
        try {

            keyValueWriterClass = conf.get("writer.class");
            if(keyValueWriterClass != null)
                writer = (KeyValueWriter) Utils.callConstructor(keyValueWriterClass);
            else
                writer = new HadoopStoreWriterPerBucket();
View Full Code Here

    }

    public int run(String[] args) throws Exception {
        if(args.length != 3)
            Utils.croak("USAGE: GenerateData input-file output-dir value-size");
        JobConf conf = new JobConf(getConf(), GenerateData.class);
        conf.setJobName("generate-data");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(GenerateDataMapper.class);
        conf.setReducerClass(IdentityReducer.class);
        conf.setNumReduceTasks(0);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(SequenceFileOutputFormat.class);
        conf.setOutputKeyClass(BytesWritable.class);
        conf.setOutputValueClass(BytesWritable.class);

        Path inputPath = new Path(args[0]);
        FileInputFormat.setInputPaths(conf, inputPath);
        Path outputPath = new Path(args[1]);
        // delete output path if it already exists
        FileSystem fs = outputPath.getFileSystem(conf);
        if(fs.exists(outputPath))
            fs.delete(outputPath, true);
        FileOutputFormat.setOutputPath(conf, outputPath);
        conf.setInt("value.size", Integer.parseInt(args[2]));

        JobClient.runJob(conf);
        return 0;
    }
View Full Code Here

  FileSystem fs;
  Path testFilePath;

  @Before
  public void openFileSystem () throws Exception {
    conf = new JobConf();
    fs = FileSystem.getLocal(conf);
    testFilePath = new Path(workDir, "TestInputOutputFormat." +
        testCaseName.getMethodName() + ".orc");
    fs.delete(testFilePath, false);
  }
View Full Code Here

    }
  }

  @Test
  public void testMROutput() throws Exception {
    JobConf job = new JobConf(conf);
    Properties properties = new Properties();
    StructObjectInspector inspector;
    synchronized (TestOrcFile.class) {
      inspector = (StructObjectInspector)
          ObjectInspectorFactory.getReflectionObjectInspector(NestedRow.class,
View Full Code Here

    reader.close();
  }

  @Test
  public void testMROutput2() throws Exception {
    JobConf job = new JobConf(conf);
    // Test that you can set the output directory using this config
    job.set("mapred.work.output.dir", testFilePath.getParent().toString());
    Properties properties = new Properties();
    StructObjectInspector inspector;
    synchronized (TestOrcFile.class) {
      inspector = (StructObjectInspector)
          ObjectInspectorFactory.getReflectionObjectInspector(StringRow.class,
View Full Code Here

  }

  @Test
  public void testEmptyFile() throws Exception {
    JobConf job = new JobConf(conf);
    Properties properties = new Properties();
    HiveOutputFormat<?, ?> outFormat = new OrcOutputFormat();
    FileSinkOperator.RecordWriter writer =
        outFormat.getHiveRecordWriter(conf, testFilePath, MyRow.class, true,
            properties, Reporter.NULL);
View Full Code Here

    }
  }

  @Test
  public void testDefaultTypes() throws Exception {
    JobConf job = new JobConf(conf);
    Properties properties = new Properties();
    StructObjectInspector inspector;
    synchronized (TestOrcFile.class) {
      inspector = (StructObjectInspector)
          ObjectInspectorFactory.getReflectionObjectInspector(StringRow.class,
View Full Code Here

   * @throws Exception
   */
  @Test
  public void testNullFileSystem() throws Exception {
    conf.set("mapred.work.output.dir", testFilePath.getParent().toString());
    JobConf job = new JobConf(conf);
    Properties properties = new Properties();
    StructObjectInspector inspector;
    synchronized (TestOrcFile.class) {
      inspector = (StructObjectInspector)
          ObjectInspectorFactory.getReflectionObjectInspector(StringRow.class,
View Full Code Here

    }

    @Override
    public void configure(JobConf job) {

        JobConf conf = job;
        try {

            keyValueWriterClass = conf.get("writer.class");
            if(keyValueWriterClass != null)
                writer = (KeyValueWriter) Utils.callConstructor(keyValueWriterClass);
            else
                writer = new HadoopStoreWriter();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.JobConf

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.