Examples of GenMapRedCtx


Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

      // find the branch on which this processor was invoked
      int pos = getPositionParent(mapJoin, stack);

      Map<Operator<? extends Serializable>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
      GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(pos));
      Task<? extends Serializable> currTask    = mapredCtx.getCurrTask();
      mapredWork currPlan = (mapredWork) currTask.getWork();
      Operator<? extends Serializable> currTopOp   = mapredCtx.getCurrTopOp();
      String currAliasId = mapredCtx.getCurrAliasId();
      Operator<? extends Serializable> reducer = mapJoin;
      HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx.getOpTaskMap();
      Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);
     
      ctx.setCurrTopOp(currTopOp);
      ctx.setCurrAliasId(currAliasId);
      ctx.setCurrTask(currTask);
     
      // If the plan for this reducer does not exist, initialize the plan
      if (opMapTask == null) {
        assert currPlan.getReducer() == null;
        GenMapRedUtils.initMapJoinPlan(mapJoin, ctx, false, false, false, pos);
      }
      // The current plan can be thrown away after being merged with the original plan
      else {
        GenMapRedUtils.joinPlan(mapJoin, null, opMapTask, ctx, pos, false, false, false);
        currTask = opMapTask;
        ctx.setCurrTask(currTask);
      }
     
      mapCurrCtx.put(mapJoin, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrTopOp(), ctx.getCurrAliasId()));
      return null;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

     
      if (listMapJoinOps.contains(mapJoin)) {
        ctx.setCurrAliasId(null);
        ctx.setCurrTopOp(null);
        Map<Operator<? extends Serializable>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
        mapCurrCtx.put((Operator<? extends Serializable>)nd, new GenMapRedCtx(ctx.getCurrTask(), null, null));
        return null;
      }

      ctx.setCurrMapJoinOp(mapJoin);
     
      Task<? extends Serializable> currTask = ctx.getCurrTask();
      GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(mapJoin);
      if (mjCtx == null) {
        mjCtx = new GenMRMapJoinCtx();
        ctx.setMapJoinCtx(mapJoin, mjCtx);
      }
     
      mapredWork mjPlan = GenMapRedUtils.getMapRedWork();
      Task<? extends Serializable> mjTask = TaskFactory.get(mjPlan, parseCtx.getConf());
     
      tableDesc tt_desc =
        PlanUtils.getIntermediateFileTableDesc(
            PlanUtils.getFieldSchemasFromRowSchema(mapJoin.getSchema(), "temporarycol"));
     
      // generate the temporary file
      Context baseCtx = parseCtx.getContext();
      String taskTmpDir = baseCtx.getMRTmpFileURI();
     
      // Add the path to alias mapping
      mjCtx.setTaskTmpDir(taskTmpDir);
      mjCtx.setTTDesc(tt_desc);
      mjCtx.setRootMapJoinOp(sel);
     
      sel.setParentOperators(null);
     
      // Create a file sink operator for this file name
      Operator<? extends Serializable> fs_op =
        OperatorFactory.get
        (new fileSinkDesc(taskTmpDir, tt_desc,
                          parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE)),
         mapJoin.getSchema());
     
      assert mapJoin.getChildOperators().size() == 1;
      mapJoin.getChildOperators().set(0, fs_op);
     
      List<Operator<? extends Serializable>> parentOpList = new ArrayList<Operator<? extends Serializable>>();
      parentOpList.add(mapJoin);
      fs_op.setParentOperators(parentOpList);
     
      currTask.addDependentTask(mjTask);
     
      ctx.setCurrTask(mjTask);
      ctx.setCurrAliasId(null);
      ctx.setCurrTopOp(null);
     
      Map<Operator<? extends Serializable>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
      mapCurrCtx.put((Operator<? extends Serializable>)nd, new GenMapRedCtx(ctx.getCurrTask(), null, null));
     
      return null;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

      // find the branch on which this processor was invoked
      int pos = getPositionParent(mapJoin, stack);

      Map<Operator<? extends Serializable>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
      GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(pos));
      Task<? extends Serializable> currTask    = mapredCtx.getCurrTask();
      mapredWork currPlan = (mapredWork) currTask.getWork();
      String currAliasId = mapredCtx.getCurrAliasId();
      Operator<? extends Serializable> reducer = mapJoin;
      HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx.getOpTaskMap();
      Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);
     
      ctx.setCurrTask(currTask);
     
      // If the plan for this reducer does not exist, initialize the plan
      if (opMapTask == null) {
        assert currPlan.getReducer() == null;
        GenMapRedUtils.initMapJoinPlan(mapJoin, ctx, true, false, false, pos);
      }
      // The current plan can be thrown away after being merged with the original plan
      else {
        GenMapRedUtils.joinPlan(mapJoin, currTask, opMapTask, ctx, pos, false, true, false);
        currTask = opMapTask;
        ctx.setCurrTask(currTask);
      }
     
      mapCurrCtx.put(mapJoin, new GenMapRedCtx(ctx.getCurrTask(), null, null));
      return null;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

      // find the branch on which this processor was invoked
      int pos = getPositionParent(mapJoin, stack);

      Map<Operator<? extends Serializable>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
      GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(pos));
      Task<? extends Serializable> currTask    = mapredCtx.getCurrTask();
      mapredWork currPlan = (mapredWork) currTask.getWork();
      Operator<? extends Serializable> reducer = mapJoin;
      HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx.getOpTaskMap();
      Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);
     
      // union result cannot be a map table
      boolean local = (pos == ((mapJoinDesc)mapJoin.getConf()).getPosBigTable()) ? false : true;
      if (local)
        throw new SemanticException(ErrorMsg.INVALID_MAPJOIN_TABLE.getMsg());
     
      // If the plan for this reducer does not exist, initialize the plan
      if (opMapTask == null) {
        assert currPlan.getReducer() == null;
        ctx.setCurrMapJoinOp(mapJoin);
        GenMapRedUtils.initMapJoinPlan(mapJoin, ctx, true, true, false, pos);
        ctx.setCurrUnionOp(null);
      }
      // The current plan can be thrown away after being merged with the original plan
      else {
        Task<? extends Serializable> uTask = ctx.getUnionTask(ctx.getCurrUnionOp()).getUTask();
        if (uTask.getId().equals(opMapTask.getId()))
          GenMapRedUtils.joinPlan(mapJoin, null, opMapTask, ctx, pos, false, false, true);
        else
          GenMapRedUtils.joinPlan(mapJoin, uTask, opMapTask, ctx, pos, false, false, true);
        currTask = opMapTask;
        ctx.setCurrTask(currTask);
      }
     
      mapCurrCtx.put(mapJoin, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrTopOp(), ctx.getCurrAliasId()));
      return null;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

      UnionParseContext uPrsCtx = uCtx.getUnionParseContext(union);
      if ((uPrsCtx != null) && (uPrsCtx.getMapJoinQuery())) {
        GenMapRedUtils.mergeMapJoinUnion(union, ctx, UnionProcFactory.getPositionParent(union, stack));
      }
      else
        mapCurrCtx.put((Operator<? extends Serializable>)nd, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrTopOp(), ctx.getCurrAliasId()));
      return null;
    }

    ctx.setCurrUnionOp(union);

    UnionParseContext uPrsCtx = uCtx.getUnionParseContext(union);
    assert uPrsCtx != null;

    Task<? extends Serializable> currTask = ctx.getCurrTask();
    int pos = UnionProcFactory.getPositionParent(union, stack);

    // is the current task a root task
    if (uPrsCtx.getRootTask(pos) && (!ctx.getRootTasks().contains(currTask)))
      ctx.getRootTasks().add(currTask);

    GenMRUnionCtx uCtxTask = ctx.getUnionTask(union);
    Task<? extends Serializable> uTask = null;

    Operator<? extends Serializable> parent = union.getParentOperators().get(pos);
    mapredWork uPlan = null;

    // union is encountered for the first time
    if (uCtxTask == null) {
      uCtxTask = new GenMRUnionCtx();
      uPlan = GenMapRedUtils.getMapRedWork();
      uTask = TaskFactory.get(uPlan, parseCtx.getConf());
      uCtxTask.setUTask(uTask);
      ctx.setUnionTask(union, uCtxTask);
    }
    else {
      uTask = uCtxTask.getUTask();
      uPlan = (mapredWork)uTask.getWork();
    }

    // If there is a mapjoin at position 'pos'
    if (uPrsCtx.getMapJoinSubq(pos)) {
      MapJoinOperator mjOp = ctx.getCurrMapJoinOp();
      assert mjOp != null;
      GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(mjOp);
      assert mjCtx != null;
      mapredWork plan = (mapredWork) currTask.getWork();

      String taskTmpDir = mjCtx.getTaskTmpDir();
      tableDesc tt_desc = mjCtx.getTTDesc();
      assert plan.getPathToAliases().get(taskTmpDir) == null;
      plan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
      plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
      plan.getPathToPartitionInfo().put(taskTmpDir, new partitionDesc(tt_desc, null));
      plan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp());
    }

    tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(
          PlanUtils.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));

    // generate the temporary file
    Context baseCtx = parseCtx.getContext();
    String taskTmpDir = baseCtx.getMRTmpFileURI();

    // Add the path to alias mapping
    uCtxTask.addTaskTmpDir(taskTmpDir);
    uCtxTask.addTTDesc(tt_desc);

    // The union task is empty. The files created for all the inputs are assembled in the
    // union context and later used to initialize the union plan

    // Create a file sink operator for this file name
    Operator<? extends Serializable> fs_op =
      OperatorFactory.get
      (new fileSinkDesc(taskTmpDir, tt_desc,
                        parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE)),
       parent.getSchema());

    assert parent.getChildOperators().size() == 1;
    parent.getChildOperators().set(0, fs_op);

    List<Operator<? extends Serializable>> parentOpList = new ArrayList<Operator<? extends Serializable>>();
    parentOpList.add(parent);
    fs_op.setParentOperators(parentOpList);

    currTask.addDependentTask(uTask);

    // If it is map-only task, add the files to be processed
    if (uPrsCtx.getMapOnlySubq(pos) && uPrsCtx.getRootTask(pos))
      GenMapRedUtils.setTaskPlan(ctx.getCurrAliasId(), ctx.getCurrTopOp(), (mapredWork) currTask.getWork(), false, ctx);

    ctx.setCurrTask(uTask);
    ctx.setCurrAliasId(null);
    ctx.setCurrTopOp(null);

    mapCurrCtx.put((Operator<? extends Serializable>)nd, new GenMapRedCtx(ctx.getCurrTask(), null, null));

    return null;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

    ReduceSinkOperator op = (ReduceSinkOperator) nd;
    GenMRProcContext ctx = (GenMRProcContext) opProcCtx;

    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx
        .getMapCurrCtx();
    GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2));
    Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
    MapredWork currPlan = (MapredWork) currTask.getWork();
    Operator<? extends OperatorDesc> currTopOp = mapredCtx.getCurrTopOp();
    String currAliasId = mapredCtx.getCurrAliasId();
    Operator<? extends OperatorDesc> reducer = op.getChildOperators().get(0);
    HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap = ctx
        .getOpTaskMap();
    Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);

    ctx.setCurrTopOp(currTopOp);
    ctx.setCurrAliasId(currAliasId);
    ctx.setCurrTask(currTask);

    // If the plan for this reducer does not exist, initialize the plan
    if (opMapTask == null) {
      if (currPlan.getReducer() == null) {
        GenMapRedUtils.initPlan(op, ctx);
      } else {
        GenMapRedUtils.splitPlan(op, ctx);
      }
    } else {
      // This will happen in case of joins. The current plan can be thrown away
      // after being merged with the original plan
      GenMapRedUtils.joinPlan(op, null, opMapTask, ctx, -1, false, false, false);
      currTask = opMapTask;
      ctx.setCurrTask(currTask);
    }

    mapCurrCtx.put(op, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrTopOp(),
        ctx.getCurrAliasId()));
    return null;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

      // find the branch on which this processor was invoked
      int pos = getPositionParent(mapJoin, stack);

      Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx
          .getMapCurrCtx();
      GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(
          pos));
      Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
      MapredWork currPlan = (MapredWork) currTask.getWork();
      Operator<? extends OperatorDesc> currTopOp = mapredCtx.getCurrTopOp();
      String currAliasId = mapredCtx.getCurrAliasId();
      Operator<? extends OperatorDesc> reducer = mapJoin;
      HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap =
        ctx.getOpTaskMap();
      Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);

      ctx.setCurrTopOp(currTopOp);
      ctx.setCurrAliasId(currAliasId);
      ctx.setCurrTask(currTask);

      // If the plan for this reducer does not exist, initialize the plan
      if (opMapTask == null) {
        assert currPlan.getReducer() == null;
        GenMapRedUtils.initMapJoinPlan(mapJoin, ctx, false, false, false, pos);
      } else {
        // The current plan can be thrown away after being merged with the
        // original plan
        GenMapRedUtils.joinPlan(mapJoin, null, opMapTask, ctx, pos, false,
            false, false);
        currTask = opMapTask;
        ctx.setCurrTask(currTask);
      }

      mapCurrCtx.put(mapJoin, new GenMapRedCtx(ctx.getCurrTask(), ctx
          .getCurrTopOp(), ctx.getCurrAliasId()));
      return null;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

    // map-join consisted on a bunch of map-only jobs, and it has been split
    // after the mapjoin
    Operator<? extends OperatorDesc> reducer = op.getChildOperators().get(0);
    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx
        .getMapCurrCtx();
    GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
    Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
    MapredWork plan = (MapredWork) currTask.getWork();
    HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap = ctx
        .getOpTaskMap();
    Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);

    ctx.setCurrTask(currTask);

    // If the plan for this reducer does not exist, initialize the plan
    if (opMapTask == null) {
      // When the reducer is encountered for the first time
      if (plan.getReducer() == null) {
        GenMapRedUtils.initMapJoinPlan(op, ctx, true, false, true, -1);
        // When mapjoin is followed by a multi-table insert
      } else {
        GenMapRedUtils.splitPlan(op, ctx);
      }
    } else {
      // There is a join after mapjoin. One of the branches of mapjoin has already
      // been initialized.
      // Initialize the current branch, and join with the original plan.
      assert plan.getReducer() != reducer;
      GenMapRedUtils.joinPlan(op, currTask, opMapTask, ctx, -1, false, true,
          false);
    }

    mapCurrCtx.put(op, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrTopOp(),
        ctx.getCurrAliasId()));

    // the mapjoin operator has been processed
    ctx.setCurrMapJoinOp(null);
    return null;
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

    ReduceSinkOperator op = (ReduceSinkOperator) nd;
    GenMRProcContext ctx = (GenMRProcContext) opProcCtx;

    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx
        .getMapCurrCtx();
    GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
    Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
    Operator<? extends OperatorDesc> currTopOp = mapredCtx.getCurrTopOp();
    String currAliasId = mapredCtx.getCurrAliasId();
    Operator<? extends OperatorDesc> reducer = op.getChildOperators().get(0);
    Map<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap = ctx
        .getOpTaskMap();
    Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);

    ctx.setCurrTopOp(currTopOp);
    ctx.setCurrAliasId(currAliasId);
    ctx.setCurrTask(currTask);

    if (opMapTask == null) {
      GenMapRedUtils.splitPlan(op, ctx);
    } else {
      GenMapRedUtils.joinPlan(op, currTask, opMapTask, ctx, -1, true, false,
          false);
      currTask = opMapTask;
      ctx.setCurrTask(currTask);
    }

    mapCurrCtx.put(op, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrTopOp(),
        ctx.getCurrAliasId()));
    return null;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx

      if (listMapJoinOps.contains(mapJoin)) {
        ctx.setCurrAliasId(null);
        ctx.setCurrTopOp(null);
        Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx
            .getMapCurrCtx();
        mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(
            ctx.getCurrTask(), null, null));
        return null;
      }

      ctx.setCurrMapJoinOp(mapJoin);

      Task<? extends Serializable> currTask = ctx.getCurrTask();
      GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(mapJoin);
      if (mjCtx == null) {
        mjCtx = new GenMRMapJoinCtx();
        ctx.setMapJoinCtx(mapJoin, mjCtx);
      }

      MapredWork mjPlan = GenMapRedUtils.getMapRedWork(parseCtx);
      Task<? extends Serializable> mjTask = TaskFactory.get(mjPlan, parseCtx
          .getConf());

      TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils
          .getFieldSchemasFromRowSchema(mapJoin.getSchema(), "temporarycol"));

      // generate the temporary file
      Context baseCtx = parseCtx.getContext();
      String taskTmpDir = baseCtx.getMRTmpFileURI();

      // Add the path to alias mapping
      mjCtx.setTaskTmpDir(taskTmpDir);
      mjCtx.setTTDesc(tt_desc);
      mjCtx.setRootMapJoinOp(sel);

      sel.setParentOperators(null);

      // Create a file sink operator for this file name
      Operator<? extends OperatorDesc> fs_op = OperatorFactory.get(
          new FileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar(
          HiveConf.ConfVars.COMPRESSINTERMEDIATE)), mapJoin.getSchema());

      assert mapJoin.getChildOperators().size() == 1;
      mapJoin.getChildOperators().set(0, fs_op);

      List<Operator<? extends OperatorDesc>> parentOpList =
        new ArrayList<Operator<? extends OperatorDesc>>();
      parentOpList.add(mapJoin);
      fs_op.setParentOperators(parentOpList);

      currTask.addDependentTask(mjTask);

      ctx.setCurrTask(mjTask);
      ctx.setCurrAliasId(null);
      ctx.setCurrTopOp(null);

      Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx
          .getMapCurrCtx();
      mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(
          ctx.getCurrTask(), null, null));

      return null;
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.