Package com.cloudera.iterativereduce.io

Examples of com.cloudera.iterativereduce.io.TextRecordParser


      // simulates the conf stuff
      worker.setup(this.generateDebugConfigurationObject());

      // InputRecordsSplit custom_reader_0 = new InputRecordsSplit(job,
      // splits[x]);
      TextRecordParser txt_reader = new TextRecordParser();

      long len = Integer.parseInt(splits[x].toString().split(":")[2]
          .split("\\+")[1]);

      txt_reader.setFile(splits[x].toString().split(":")[1], 0, len);

      worker.setRecordParser(txt_reader);

      workers.add(worker);
View Full Code Here


      // simulates the conf stuff
      worker.setup(this.generateDebugConfigurationObject());

      // InputRecordsSplit custom_reader_0 = new InputRecordsSplit(job,
      // splits[x]);
      TextRecordParser txt_reader = new TextRecordParser();

      long len = Integer.parseInt(splits[x].toString().split(":")[2]
          .split("\\+")[1]);

      txt_reader.setFile(splits[x].toString().split(":")[1], 0, len);

      worker.setRecordParser(txt_reader);

      workers.add(worker);
View Full Code Here

      InputSplit[] splits = generateDebugSplits(workDir, job);
     
      System.out.println( "> splits: " + splits[0].toString() );

     
      TextRecordParser txt_reader = new TextRecordParser();

      long len = Integer.parseInt(splits[0].toString().split(":")[2]
          .split("\\+")[1]);

      txt_reader.setFile(splits[0].toString().split(":")[1], 0, len);   
   
       

     
    MnistHDFSDataSetIterator hdfs_fetcher = new MnistHDFSDataSetIterator( batchSize, totalNumExamples, txt_reader );
View Full Code Here

     
 

  public static void main(String[] args) throws Exception {

    TextRecordParser parser = new TextRecordParser();
    WorkerNode wn = new WorkerNode();
    ApplicationWorker<DBNParameterVectorUpdateable> aw = new ApplicationWorker<DBNParameterVectorUpdateable>(parser, wn, DBNParameterVectorUpdateable.class);
         
    ToolRunner.run(aw, args);
   
View Full Code Here

    // TODO Auto-generated method stub
    return compute();
  }

  public static void main(String[] args) throws Exception {
    TextRecordParser parser = new TextRecordParser();
    MiniBatchWorkerNode pwn = new MiniBatchWorkerNode();
    ApplicationWorker<ParameterVectorUpdateable> aw = new ApplicationWorker<ParameterVectorUpdateable>(
        parser, pwn, ParameterVectorUpdateable.class);

    ToolRunner.run(aw, args);
View Full Code Here

    // TODO Auto-generated method stub
    return compute();
  }
 
  public static void main(String[] args) throws Exception {
    TextRecordParser parser = new TextRecordParser();
    POLRWorkerNode pwn = new POLRWorkerNode();
    ApplicationWorker<ParameterVectorUpdatable> aw = new ApplicationWorker<ParameterVectorUpdatable>(
        parser, pwn, ParameterVectorUpdatable.class);
   
    ToolRunner.run(aw, args);
View Full Code Here

      List<ParameterVectorUpdateable> records) {
    return compute();
  }

  public static void main(String[] args) throws Exception {
    TextRecordParser parser = new TextRecordParser();
    WorkerNode pwn = new WorkerNode();
    ApplicationWorker<ParameterVectorUpdateable> aw = new ApplicationWorker<ParameterVectorUpdateable>(
        parser, pwn, ParameterVectorUpdateable.class);

    ToolRunner.run(aw, args);
View Full Code Here

        }
       
      } 
 
    public static void main(String[] args) throws Exception {
        TextRecordParser parser = new TextRecordParser();
        WorkerNode wn = new WorkerNode();
        ApplicationWorker<NetworkWeightsUpdateable> aw = new ApplicationWorker<NetworkWeightsUpdateable>(
            parser, wn, NetworkWeightsUpdateable.class);
       
        ToolRunner.run(aw, args);
View Full Code Here

      InputSplit[] splits = generateDebugSplits(workDir, job);
     
      System.out.println( "> splits: " + splits[0].toString() );

     
      TextRecordParser txt_reader = new TextRecordParser();

      long len = Integer.parseInt(splits[0].toString().split(":")[2]
          .split("\\+")[1]);

      txt_reader.setFile(splits[0].toString().split(":")[1], 0, len);

//      worker_model_builder.setRecordParser(txt_reader);
   
   
   
View Full Code Here

      InputSplit[] splits = generateDebugSplits(workDir, job);
     
      System.out.println( "> splits: " + splits[0].toString() );

     
      TextRecordParser txt_reader = new TextRecordParser();

      // drop the fully qualified host
      //String minus_host = splits[0].toString().split( "" )
     
      long len = 0;
      String path = "";
     
      // YES, more clever regex here would be clever.
      // I'm on a plane right now and lazy. sue me.

      if ( splits[0].toString().startsWith( "file:///" ) ) {
       
        path = splits[0].toString().replaceFirst("file:///", "/").split(":")[0];
       
        len = splits[0].getLength();
       
      } else if ( splits[0].toString().startsWith( "file://" ) ) {
         
          path = splits[0].toString().replaceFirst("file://", "/").split(":")[0];
         
          len = splits[0].getLength();
     
      } else if ( splits[0].toString().startsWith( "file:/" ) ) {
       
        path = splits[0].toString().replaceFirst("file:/", "/").split(":")[0];
       
        len = splits[0].getLength();
     
      } else if ( splits[0].toString().substring(1).equals( "/" ) ) {
     
        // we have something like:
        // /user/cloudera/metronome/dbn/mnist/mnist_filtered_conversion_test.metronome:0+20908
       
        len = Integer.parseInt(splits[0].toString().split(":")[2]
            .split("\\+")[1]);
       
        //System.out.println("Split Length: " + len + ", versus method: " + splits[0].getLength() );
       
        path = splits[0].toString().split(":")[0];
       
      } else if (splits[0].toString().substring(8).equals( "hdfs:///" ) ) {
       
        // we have something like:
        // hdfs:///user/cloudera/metronome/dbn/mnist/mnist_filtered_conversion_test.metronome:0+20908
       
       
        len = Integer.parseInt(splits[0].toString().split(":")[2]
            .split("\\+")[1]);

        //System.out.println( "number of slices from ':' " + splits[0].toString().split(":").length );
       
        //System.out.println("Split Length: " + len + ", versus method: " + splits[0].getLength() );
       
        path = splits[0].toString().split(":")[1];
       
      } else {
       
        // we have something like:
        // hdfs://localhost.localdomain:8020/user/cloudera/metronome/dbn/mnist/mnist_filtered_conversion_test.metronome:0+20908
       
        len = Integer.parseInt(splits[0].toString().split(":")[3]
            .split("\\+")[1]);

        //System.out.println( "number of slices from ':' " + splits[0].toString().split(":").length );
       
        //System.out.println("Split Length: " + len + ", versus method: " + splits[0].getLength() );
       
        String[] parts = splits[0].toString().split(":");
       
        path = parts[0] + ":" + parts[1] + ":" + parts[2];
       
       
      }
     
      //System.out.println("raw path: " + path);
     

    //  txt_reader.setFile(splits[0].toString().split(":")[1], 0, len);   
      txt_reader.setFile( path, 0, len);
   
         

     
    MnistHDFSDataSetIterator hdfs_fetcher = new MnistHDFSDataSetIterator( batchSize, 1, txt_reader );
View Full Code Here

TOP

Related Classes of com.cloudera.iterativereduce.io.TextRecordParser

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.