Package com.splout.db.hadoop

Examples of com.splout.db.hadoop.TableSpec$FieldIndex


            tupleInTuple1.set(SploutSQLOutputFormat.PARTITION_TUPLE_FIELD, 0);
            collector.write(tupleInTuple1);
          }
        });

    TableSpec table1 = new TableSpec(tupleSchema1, tupleSchema1.getField(0));
      
    builder.setTupleReducer(new IdentityTupleReducer());
    builder.setGroupByFields(SploutSQLOutputFormat.PARTITION_TUPLE_FIELD);
    builder.setOutput(new Path(OUTPUT), OutputFormatFactory.getOutputFormat(engine, 10000, new TableSpec[] { table1 }),
        ITuple.class, NullWritable.class);
View Full Code Here


  @Test
  public void testPreSQL() throws Exception {
    final Schema tupleSchema1 = new Schema("schema1", Fields.parse("a:string, b:int"));
    String[] initSQL = new String[] { "init1", "init2" };
    String[] preInsertSQL = new String[] { "CREATE Mytable;", "ME_LO_INVENTO" };
    TableSpec tableSpec = new TableSpec(tupleSchema1, new Field[] { tupleSchema1.getField(0) },
        new FieldIndex[] { new FieldIndex(tupleSchema1.getField(0), tupleSchema1.getField(1)) },
        initSQL, preInsertSQL, null, null, null);
    String[] createTables = new SQLite4JavaOutputFormat(10, tableSpec)
        .getCreateTables(tableSpec);
    assertEquals("init1", createTables[0]);
View Full Code Here

  @Test
  public void testPostSQL() throws Exception {
    final Schema tupleSchema1 = new Schema("schema1", Fields.parse("a:string, b:int"));
    String[] afterInsertSQL = new String[] { "afterinsert1", "afterinsert2" };
    String[] finalSQL = new String[] { "DROP INDEX idx_schema1_ab", "CREATE INDEX blablabla" };
    TableSpec tableSpec = new TableSpec(tupleSchema1, new Field[] { tupleSchema1.getField(0) },
        new FieldIndex[] { new FieldIndex(tupleSchema1.getField(0), tupleSchema1.getField(1)) }, null,
        null, afterInsertSQL, finalSQL, null);
    String[] createIndex = SploutSQLOutputFormat.getCreateIndexes(tableSpec);
    assertEquals("afterinsert1", createIndex[0]);
    assertEquals("afterinsert2", createIndex[1]);
View Full Code Here

        SploutHadoopConfiguration.addSQLite4JavaNativeLibsToDC(conf);
      }
    }
   
    MapOnlyJobBuilder job = new MapOnlyJobBuilder(conf);
    TableSpec tableSpec = new TableSpec(schema, schema.getFields().get(1));
   
    job.setOutput(new Path(out, "store"), new SploutSQLProxyOutputFormat(new SQLite4JavaOutputFormat(1000000, tableSpec)), ITuple.class,
        NullWritable.class);
    job.addInput(input, new HadoopInputFormat(TextInputFormat.class), new MapOnlyMapper<LongWritable, Text, ITuple, NullWritable>() {
View Full Code Here

public class TestSQLite4JavaOutputFormat extends SploutSQLOutputFormatTester implements Serializable {

  @Test
  public void testCompoundIndexes() throws Exception {
    final Schema tupleSchema1 = new Schema("schema1", Fields.parse("a:string, b:int"));
    TableSpec tableSpec = new TableSpec(tupleSchema1, new Field[] { tupleSchema1.getField(0) },
        new FieldIndex[] { new FieldIndex(tupleSchema1.getField(0), tupleSchema1.getField(1)) }, null,
        null, null, null, null);
    String[] createIndex = SploutSQLOutputFormat.getCreateIndexes(tableSpec);
    assertEquals("CREATE INDEX idx_schema1_ab ON schema1(`a`, `b`);", createIndex[0]);
  }
View Full Code Here

TOP

Related Classes of com.splout.db.hadoop.TableSpec$FieldIndex

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.