Package org.apache.hadoop.hive.ql.plan

Examples of org.apache.hadoop.hive.ql.plan.fileSinkDesc


    Utilities.addMapWork(mr, src, "a", op4);
    mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
    mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());

    // reduce side work
    Operator<FileSinkDesc> op3 = OperatorFactory.get(new FileSinkDesc(tmpdir
        + "mapredplan4.out", Utilities.defaultTd, false));

    Operator<ExtractDesc> op2 = OperatorFactory.get(new ExtractDesc(
        getStringColumn(Utilities.ReduceField.VALUE.toString())), op3);
View Full Code Here


      plan.getAliasToWork().put(alias_id, topOp);
      LOG.debug("Created Map Work for " + alias_id);
    }
    else {
      FileSinkOperator fOp = (FileSinkOperator) topOp;
      fileSinkDesc fConf = (fileSinkDesc)fOp.getConf();
      // populate local work if needed
    }
  }
View Full Code Here

      PlanUtils.getBinaryTableDesc(PlanUtils.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));
   
    // Create a file sink operator for this file name
    Operator<? extends Serializable> fs_op =
      putOpInsertMap(OperatorFactory.get
                     (new fileSinkDesc(taskTmpDir, tt_desc,
                                       parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE)),
                      parent.getSchema()), null, parseCtx);
   
    // replace the reduce child with this operator
    List<Operator<? extends Serializable>> childOpList = parent.getChildOperators();
View Full Code Here

    // union context and later used to initialize the union plan
   
    // Create a file sink operator for this file name
    Operator<? extends Serializable> fs_op =
      OperatorFactory.get
      (new fileSinkDesc(taskTmpDir, tt_desc,
                        parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE)),
       parent.getSchema());
   
    assert parent.getChildOperators().size() == 1;
    parent.getChildOperators().set(0, fs_op);
View Full Code Here

    tableDesc tt_desc =
      PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));

    // Create a file sink operator for this file name
    boolean compressIntermediate = parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE);
    fileSinkDesc desc = new fileSinkDesc(taskTmpDir, tt_desc, compressIntermediate);
    if (compressIntermediate) {
      desc.setCompressCodec(parseCtx.getConf().getVar(HiveConf.ConfVars.COMPRESSINTERMEDIATECODEC));
      desc.setCompressType(parseCtx.getConf().getVar(HiveConf.ConfVars.COMPRESSINTERMEDIATETYPE));
    }
    Operator<? extends Serializable> fs_op = putOpInsertMap(OperatorFactory.get(desc, parent.getSchema()), null, parseCtx);

    // replace the reduce child with this operator
    List<Operator<? extends Serializable>> childOpList = parent.getChildOperators();
View Full Code Here

      sel.setParentOperators(null);
     
      // Create a file sink operator for this file name
      Operator<? extends Serializable> fs_op =
        OperatorFactory.get
        (new fileSinkDesc(taskTmpDir, tt_desc,
                          parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE)),
         mapJoin.getSchema());
     
      assert mapJoin.getChildOperators().size() == 1;
      mapJoin.getChildOperators().set(0, fs_op);
View Full Code Here

    // union context and later used to initialize the union plan

    // Create a file sink operator for this file name
    Operator<? extends Serializable> fs_op =
      OperatorFactory.get
      (new fileSinkDesc(taskTmpDir, tt_desc,
                        parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE)),
       parent.getSchema());

    assert parent.getChildOperators().size() == 1;
    parent.getChildOperators().set(0, fs_op);
View Full Code Here

    ReduceSinkOperator rsOp = (ReduceSinkOperator)OperatorFactory.getAndMakeChild(rsDesc, fsRS, ts_op);
    mapredWork cplan = GenMapRedUtils.getMapRedWork();
    ParseContext parseCtx = ctx.getParseCtx();

    Task<? extends Serializable> mergeTask = TaskFactory.get(cplan, parseCtx.getConf());
    fileSinkDesc fsConf = fsOp.getConf();
   
    // Add the extract operator to get the value fields
    RowResolver out_rwsch = new RowResolver();
    RowResolver interim_rwsch = ctx.getParseCtx().getOpParseCtx().get(fsOp).getRR();
    Integer pos = Integer.valueOf(0);
    for(ColumnInfo colInfo: interim_rwsch.getColumnInfos()) {
      String [] info = interim_rwsch.reverseLookup(colInfo.getInternalName());
      out_rwsch.put(info[0], info[1],
                    new ColumnInfo(pos.toString(), colInfo.getType(), info[0],
                          colInfo.getIsPartitionCol()));
      pos = Integer.valueOf(pos.intValue() + 1);
    }

    Operator extract =
      OperatorFactory.getAndMakeChild(
        new extractDesc(new exprNodeColumnDesc(TypeInfoFactory.stringTypeInfo,
                                Utilities.ReduceField.VALUE.toString(), "", false)),
        new RowSchema(out_rwsch.getColumnInfos()));
   
    tableDesc ts = (tableDesc)fsConf.getTableInfo().clone();
    fsConf.getTableInfo().getProperties().remove(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS);
    FileSinkOperator newOutput =
      (FileSinkOperator)OperatorFactory.getAndMakeChild(
         new fileSinkDesc(finalName, ts,
                          parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSRESULT)),
         fsRS, extract);

    cplan.setReducer(extract);
    ArrayList<String> aliases = new ArrayList<String>();
    aliases.add(fsConf.getDirName());
    cplan.getPathToAliases().put(fsConf.getDirName(), aliases);
    cplan.getAliasToWork().put(fsConf.getDirName(), ts_op);   
    cplan.getPathToPartitionInfo().put(fsConf.getDirName(), new partitionDesc(fsConf.getTableInfo(), null));
    cplan.setNumReduceTasks(-1);
   
    moveWork dummyMv = new moveWork(null, null, null, new loadFileDesc(fsOp.getConf().getDirName(), finalName, true, null, null), false);
    Task<? extends Serializable> dummyMergeTask = TaskFactory.get(dummyMv, ctx.getConf());
    List<Serializable> listWorks = new ArrayList<Serializable>();
View Full Code Here

    RowSchema fsRS = new RowSchema(vecCol);

    Operator output = putOpInsertMap(
      OperatorFactory.getAndMakeChild(
        new fileSinkDesc(queryTmpdir, table_desc,
                         conf.getBoolVar(HiveConf.ConfVars.COMPRESSRESULT), currentTableId),
        fsRS, input), inputRR);

    LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: "
        + dest_path + " row schema: "
View Full Code Here

  @SuppressWarnings("unchecked")
  private void populateMapPlan1(Table src) {
    mr.setNumReduceTasks(Integer.valueOf(0));

    Operator<fileSinkDesc> op2 = OperatorFactory.get(new fileSinkDesc
                                                     (tmpdir + "mapplan1.out",
                                                      Utilities.defaultTd, true));
    Operator<filterDesc> op1 =
      OperatorFactory.get(getTestFilterDesc("key"), op2);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.plan.fileSinkDesc

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.