Examples of tableSpec


Examples of com.splout.db.hadoop.TableSpec

            tupleInTuple1.set(SploutSQLOutputFormat.PARTITION_TUPLE_FIELD, 0);
            collector.write(tupleInTuple1);
          }
        });

    TableSpec table1 = new TableSpec(tupleSchema1, tupleSchema1.getField(0));
      
    builder.setTupleReducer(new IdentityTupleReducer());
    builder.setGroupByFields(SploutSQLOutputFormat.PARTITION_TUPLE_FIELD);
    builder.setOutput(new Path(OUTPUT), OutputFormatFactory.getOutputFormat(engine, 10000, new TableSpec[] { table1 }),
        ITuple.class, NullWritable.class);
View Full Code Here

Examples of com.splout.db.hadoop.TableSpec

  @Test
  public void testPreSQL() throws Exception {
    final Schema tupleSchema1 = new Schema("schema1", Fields.parse("a:string, b:int"));
    String[] initSQL = new String[] { "init1", "init2" };
    String[] preInsertSQL = new String[] { "CREATE Mytable;", "ME_LO_INVENTO" };
    TableSpec tableSpec = new TableSpec(tupleSchema1, new Field[] { tupleSchema1.getField(0) },
        new FieldIndex[] { new FieldIndex(tupleSchema1.getField(0), tupleSchema1.getField(1)) },
        initSQL, preInsertSQL, null, null, null);
    String[] createTables = new SQLite4JavaOutputFormat(10, tableSpec)
        .getCreateTables(tableSpec);
    assertEquals("init1", createTables[0]);
View Full Code Here

Examples of com.splout.db.hadoop.TableSpec

  @Test
  public void testPostSQL() throws Exception {
    final Schema tupleSchema1 = new Schema("schema1", Fields.parse("a:string, b:int"));
    String[] afterInsertSQL = new String[] { "afterinsert1", "afterinsert2" };
    String[] finalSQL = new String[] { "DROP INDEX idx_schema1_ab", "CREATE INDEX blablabla" };
    TableSpec tableSpec = new TableSpec(tupleSchema1, new Field[] { tupleSchema1.getField(0) },
        new FieldIndex[] { new FieldIndex(tupleSchema1.getField(0), tupleSchema1.getField(1)) }, null,
        null, afterInsertSQL, finalSQL, null);
    String[] createIndex = SploutSQLOutputFormat.getCreateIndexes(tableSpec);
    assertEquals("afterinsert1", createIndex[0]);
    assertEquals("afterinsert2", createIndex[1]);
View Full Code Here

Examples of com.splout.db.hadoop.TableSpec

        SploutHadoopConfiguration.addSQLite4JavaNativeLibsToDC(conf);
      }
    }
   
    MapOnlyJobBuilder job = new MapOnlyJobBuilder(conf);
    TableSpec tableSpec = new TableSpec(schema, schema.getFields().get(1));
   
    job.setOutput(new Path(out, "store"), new SploutSQLProxyOutputFormat(new SQLite4JavaOutputFormat(1000000, tableSpec)), ITuple.class,
        NullWritable.class);
    job.addInput(input, new HadoopInputFormat(TextInputFormat.class), new MapOnlyMapper<LongWritable, Text, ITuple, NullWritable>() {
View Full Code Here

Examples of com.splout.db.hadoop.TableSpec

public class TestSQLite4JavaOutputFormat extends SploutSQLOutputFormatTester implements Serializable {

  @Test
  public void testCompoundIndexes() throws Exception {
    final Schema tupleSchema1 = new Schema("schema1", Fields.parse("a:string, b:int"));
    TableSpec tableSpec = new TableSpec(tupleSchema1, new Field[] { tupleSchema1.getField(0) },
        new FieldIndex[] { new FieldIndex(tupleSchema1.getField(0), tupleSchema1.getField(1)) }, null,
        null, null, null, null);
    String[] createIndex = SploutSQLOutputFormat.getCreateIndexes(tableSpec);
    assertEquals("CREATE INDEX idx_schema1_ab ON schema1(`a`, `b`);", createIndex[0]);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec

    List<Partition> list = new ArrayList<Partition>();

    if (work.getTableSpecs() != null) {

      // ANALYZE command
      tableSpec tblSpec = work.getTableSpecs();
      table = tblSpec.tableHandle;
      if (!table.isPartitioned()) {
        return null;
      }
      // get all partitions that matches with the partition spec
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec

    List<Partition> list = new ArrayList<Partition>();

    if (work.getTableSpecs() != null) {

      // ANALYZE command
      tableSpec tblSpec = work.getTableSpecs();
      table = tblSpec.tableHandle;
      if (!table.isPartitioned()) {
        return null;
      }
      // get all partitions that matches with the partition spec
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec

    List<Partition> list = new ArrayList<Partition>();

    if (work.getTableSpecs() != null) {

      // ANALYZE command
      tableSpec tblSpec = work.getTableSpecs();
      table = tblSpec.tableHandle;
      if (!table.isPartitioned()) {
        return null;
      }
      // get all partitions that matches with the partition spec
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec

          }
          currWork.setGatheringStats(true);
          // NOTE: here we should use the new partition predicate pushdown API to get a list of pruned list,
          // and pass it to setTaskPlan as the last parameter
          Set<Partition> confirmedPartns = new HashSet<Partition>();
          tableSpec tblSpec = parseInfo.getTableSpec();
          if (tblSpec.specType == tableSpec.SpecType.STATIC_PARTITION) {
            // static partition
            confirmedPartns.add(tblSpec.partHandle);
          } else if (tblSpec.specType == tableSpec.SpecType.DYNAMIC_PARTITION) {
            // dynamic partition
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec

    List<Partition> list = new ArrayList<Partition>();

    if (work.getTableSpecs() != null) {

      // ANALYZE command
      tableSpec tblSpec = work.getTableSpecs();
      table = tblSpec.tableHandle;
      if (!table.isPartitioned()) {
        return null;
      }
      // get all partitions that matches with the partition spec
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.