Package eu.stratosphere.compiler.plan

Examples of eu.stratosphere.compiler.plan.PlanNode


     
      if (strategy.firstDam() == DamBehavior.FULL_DAM || in1.getLocalStrategy().dams() || in1.getTempMode().breaksPipeline()) {
        someDamOnLeftPaths = true;
      } else {
        for (OptimizerNode brancher : this.hereJoinedBranches) {
          PlanNode candAtBrancher = in1.getSource().getCandidateAtBranchPoint(brancher);
         
          // not all candidates are found, because this list includes joined branched from both regular inputs and broadcast vars
          if (candAtBrancher == null) {
            continue;
          }
         
          SourceAndDamReport res = in1.getSource().hasDamOnPathDownTo(candAtBrancher);
          if (res == NOT_FOUND) {
            throw new CompilerException("Bug: Tracing dams for deadlock detection is broken.");
          } else if (res == FOUND_SOURCE) {
            damOnAllLeftPaths = false;
          } else if (res == FOUND_SOURCE_AND_DAM) {
            someDamOnLeftPaths = true;
          } else {
            throw new CompilerException();
          }
        }
      }
     
      if (strategy.secondDam() == DamBehavior.FULL_DAM || in2.getLocalStrategy().dams() || in2.getTempMode().breaksPipeline()) {
        someDamOnRightPaths = true;
      } else {
        for (OptimizerNode brancher : this.hereJoinedBranches) {
          PlanNode candAtBrancher = in2.getSource().getCandidateAtBranchPoint(brancher);
         
          // not all candidates are found, because this list includes joined branched from both regular inputs and broadcast vars
          if (candAtBrancher == null) {
            continue;
          }
View Full Code Here


    // 4) Throw away all that are not compatible with the properties currently requested to the
    //    initial partial solution
   
    // Make sure that the workset candidates fulfill the input requirements
    for (Iterator<PlanNode> planDeleter = worksetCandidates.iterator(); planDeleter.hasNext(); ) {
      PlanNode candidate = planDeleter.next();
      if (!(globPropsReqWorkset.isMetBy(candidate.getGlobalProperties()) && locPropsReqWorkset.isMetBy(candidate.getLocalProperties()))) {
        planDeleter.remove();
      }
    }
    if (worksetCandidates.isEmpty()) {
      return;
    }
   
    // sanity check the solution set delta and cancel out the delta node, if it is not needed
    for (Iterator<PlanNode> deltaPlans = solutionSetDeltaCandidates.iterator(); deltaPlans.hasNext(); ) {
      SingleInputPlanNode candidate = (SingleInputPlanNode) deltaPlans.next();
      GlobalProperties gp = candidate.getGlobalProperties();
     
      if (gp.getPartitioning() != PartitioningProperty.HASH_PARTITIONED || gp.getPartitioningFields() == null ||
          !gp.getPartitioningFields().equals(this.solutionSetKeyFields))
      {
        throw new CompilerException("Bug: The solution set delta is not partitioned.");
View Full Code Here

       
        // check that the root of the step function has the same DOP as the iteration.
        // because the tail must have the same DOP as the head, we can only merge the last
        // operator with the tail, if they have the same DOP. not merging is currently not
        // implemented
        PlanNode root = iterationNode.getRootOfStepFunction();
        if (root.getDegreeOfParallelism() != node.getDegreeOfParallelism() ||
            root.getSubtasksPerInstance() != node.getSubtasksPerInstance())
        {
          throw new CompilerException("Error: The final operator of the step " +
              "function has a different degree of parallelism than the iteration operator itself.");
        }
       
        IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++);
        this.iterations.put(iterationNode, descr);
        vertex = null;
      }
      else if (node instanceof WorksetIterationPlanNode) {
        WorksetIterationPlanNode iterationNode = (WorksetIterationPlanNode) node;

        // we have the same constraints as for the bulk iteration
        PlanNode nextWorkSet = iterationNode.getNextWorkSetPlanNode();
        PlanNode solutionSetDelta  = iterationNode.getSolutionSetDeltaPlanNode();
       
        if (nextWorkSet.getDegreeOfParallelism() != node.getDegreeOfParallelism() ||
          nextWorkSet.getSubtasksPerInstance() != node.getSubtasksPerInstance())
        {
          throw new CompilerException("It is currently not supported that the final operator of the step " +
              "function has a different degree of parallelism than the iteration operator itself.");
        }
        if (solutionSetDelta.getDegreeOfParallelism() != node.getDegreeOfParallelism() ||
          solutionSetDelta.getSubtasksPerInstance() != node.getSubtasksPerInstance())
        {
          throw new CompilerException("It is currently not supported that the final operator of the step " +
              "function has a different degree of parallelism than the iteration operator itself.");
        }
       
        IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++);
        this.iterations.put(iterationNode, descr);
        vertex = null;
      }
      else if (node instanceof SingleInputPlanNode) {
        vertex = createSingleInputVertex((SingleInputPlanNode) node);
      }
      else if (node instanceof DualInputPlanNode) {
        vertex = createDualInputVertex((DualInputPlanNode) node);
      }
      else if (node instanceof NAryUnionPlanNode) {
        // skip the union for now
        vertex = null;
      }
      else if (node instanceof BulkPartialSolutionPlanNode) {
        // create a head node (or not, if it is merged into its successor)
        vertex = createBulkIterationHead((BulkPartialSolutionPlanNode) node);
      }
      else if (node instanceof SolutionSetPlanNode) {
        // this represents an access into the solution set index.
        // we do not create a vertex for the solution set here (we create the head at the workset place holder)
       
        // we adjust the joins / cogroups that go into the solution set here
        for (Channel c : node.getOutgoingChannels()) {
          DualInputPlanNode target = (DualInputPlanNode) c.getTarget();
          AbstractJobVertex accessingVertex = this.vertices.get(target);
          TaskConfig conf = new TaskConfig(accessingVertex.getConfiguration());
          int inputNum = c == target.getInput1() ? 0 : c == target.getInput2() ? 1 : -1;
         
          // sanity checks
          if (inputNum == -1) {
            throw new CompilerException();
          }
         
          // adjust the driver
          if (conf.getDriver().equals(MatchDriver.class)) {
            conf.setDriver(inputNum == 0 ? JoinWithSolutionSetFirstDriver.class : JoinWithSolutionSetSecondDriver.class);
          }
          else if (conf.getDriver().equals(CoGroupDriver.class)) {
            conf.setDriver(inputNum == 0 ? CoGroupWithSolutionSetFirstDriver.class : CoGroupWithSolutionSetSecondDriver.class);
          }
          else {
            throw new CompilerException("Found join with solution set using incompatible operator (only Join/CoGroup are valid).");
          }
        }
       
        // make sure we do not visit this node again. for that, we add a 'already seen' entry into one of the sets
        this.chainedTasks.put(node, ALREADY_VISITED_PLACEHOLDER);
       
        vertex = null;
      }
      else if (node instanceof WorksetPlanNode) {
        // create the iteration head here
        vertex = createWorksetIterationHead((WorksetPlanNode) node);
      }
      else {
        throw new CompilerException("Unrecognized node type: " + node.getClass().getName());
      }
    }
    catch (Exception e) {
      throw new CompilerException("Error translating node '" + node + "': " + e.getMessage(), e);
    }
   
    // check if a vertex was created, or if it was chained or skipped
    if (vertex != null) {
      // set degree of parallelism
      int pd = node.getDegreeOfParallelism();
      vertex.setNumberOfSubtasks(pd);
 
      // check whether this is the vertex with the highest degree of parallelism
      if (this.maxDegreeVertex == null || this.maxDegreeVertex.getNumberOfSubtasks() < pd) {
        this.maxDegreeVertex = vertex;
      }
 
      // set the number of tasks per instance
      if (node.getSubtasksPerInstance() >= 1) {
        vertex.setNumberOfSubtasksPerInstance(node.getSubtasksPerInstance());
      }
     
      // check whether this vertex is part of an iteration step function
      if (this.currentIteration != null) {
        // check that the task has the same DOP as the iteration as such
        PlanNode iterationNode = (PlanNode) this.currentIteration;
        if (iterationNode.getDegreeOfParallelism() < pd) {
          throw new CompilerException("Error: All functions that are part of an iteration must have the same, or a lower, degree-of-parallelism than the iteration operator.");
        }
        if (iterationNode.getSubtasksPerInstance() < node.getSubtasksPerInstance()) {
          throw new CompilerException("Error: All functions that are part of an iteration must have the same, or a lower, number of subtasks-per-node than the iteration operator.");
        }
       
        // store the id of the iterations the step functions participate in
        IterationDescriptor descr = this.iterations.get(this.currentIteration);
View Full Code Here

          }
 
          AbstractJobVertex container = chainedTask.getContainingVertex();
         
          if (container == null) {
            final PlanNode sourceNode = inConn.getSource();
            container = this.vertices.get(sourceNode);
            if (container == null) {
              // predecessor is itself chained
              container = this.chainedTasks.get(sourceNode).getContainingVertex();
              if (container == null) {
View Full Code Here

  }
 
  private int translateChannel(Channel input, int inputIndex, AbstractJobVertex targetVertex,
      TaskConfig targetVertexConfig, boolean isBroadcast) throws Exception
  {
    final PlanNode inputPlanNode = input.getSource();
    final Iterator<Channel> allInChannels;
   
    if (inputPlanNode instanceof NAryUnionPlanNode) {
      allInChannels = ((NAryUnionPlanNode) inputPlanNode).getListOfInputs().iterator();
    }
    else if (inputPlanNode instanceof BulkPartialSolutionPlanNode) {
      if (this.vertices.get(inputPlanNode) == null) {
        // merged iteration head
        final BulkPartialSolutionPlanNode pspn = (BulkPartialSolutionPlanNode) inputPlanNode;
        final BulkIterationPlanNode iterationNode = pspn.getContainingIterationNode();
       
        // check if the iteration's input is a union
        if (iterationNode.getInput().getSource() instanceof NAryUnionPlanNode) {
          allInChannels = ((NAryUnionPlanNode) iterationNode.getInput().getSource()).getInputs();
        } else {
          allInChannels = Collections.singletonList(iterationNode.getInput()).iterator();
        }
       
        // also, set the index of the gate with the partial solution
        targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(inputIndex);
      } else {
        // standalone iteration head
        allInChannels = Collections.singletonList(input).iterator();
      }
    } else if (inputPlanNode instanceof WorksetPlanNode) {
      if (this.vertices.get(inputPlanNode) == null) {
        // merged iteration head
        final WorksetPlanNode wspn = (WorksetPlanNode) inputPlanNode;
        final WorksetIterationPlanNode iterationNode = wspn.getContainingIterationNode();
       
        // check if the iteration's input is a union
        if (iterationNode.getInput2().getSource() instanceof NAryUnionPlanNode) {
          allInChannels = ((NAryUnionPlanNode) iterationNode.getInput2().getSource()).getInputs();
        } else {
          allInChannels = Collections.singletonList(iterationNode.getInput2()).iterator();
        }
       
        // also, set the index of the gate with the partial solution
        targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(inputIndex);
      } else {
        // standalone iteration head
        allInChannels = Collections.singletonList(input).iterator();
      }
    } else if (inputPlanNode instanceof SolutionSetPlanNode) {
      // for now, skip connections with the solution set node, as this is a local index access (later to be parameterized here)
      // rather than a vertex connection
      return 0;
    } else {
      allInChannels = Collections.singletonList(input).iterator();
    }
   
    // check that the type serializer is consistent
    TypeSerializerFactory<?> typeSerFact = null;
   
    // accounting for channels on the dynamic path
    int numChannelsTotal = 0;
    int numChannelsDynamicPath = 0;
    int numDynamicSenderTasksTotal = 0;
   

    // expand the channel to all the union channels, in case there is a union operator at its source
    while (allInChannels.hasNext()) {
      final Channel inConn = allInChannels.next();
     
      // sanity check the common serializer
      if (typeSerFact == null) {
        typeSerFact = inConn.getSerializer();
      } else if (!typeSerFact.equals(inConn.getSerializer())) {
        throw new CompilerException("Conflicting types in union operator.");
      }
     
      final PlanNode sourceNode = inConn.getSource();
      AbstractJobVertex sourceVertex = this.vertices.get(sourceNode);
      TaskConfig sourceVertexConfig;

      if (sourceVertex == null) {
        // this predecessor is chained to another task or an iteration
View Full Code Here

   
    // check, whether chaining is possible
    boolean chaining = false;
    {
      Channel inConn = node.getInput();
      PlanNode pred = inConn.getSource();
      chaining = ds.getPushChainDriverClass() != null &&
          !(pred instanceof NAryUnionPlanNode) &&  // first op after union is stand-alone, because union is merged
          !(pred instanceof BulkPartialSolutionPlanNode) &&  // partial solution merges anyways
          !(pred instanceof WorksetPlanNode) &&  // workset merges anyways
          !(pred instanceof IterationPlanNode) && // cannot chain with iteration heads currently
          inConn.getShipStrategy() == ShipStrategyType.FORWARD &&
          inConn.getLocalStrategy() == LocalStrategy.NONE &&
          pred.getOutgoingChannels().size() == 1 &&
          node.getDegreeOfParallelism() == pred.getDegreeOfParallelism() &&
          node.getSubtasksPerInstance() == pred.getSubtasksPerInstance() &&
          node.getBroadcastInputs().isEmpty();
     
      // cannot chain the nodes that produce the next workset or the next solution set, if they are not the
      // in a tail
      if (this.currentIteration != null && this.currentIteration instanceof WorksetIterationPlanNode &&
View Full Code Here

    //    this translates to a local strategy that would only be executed in the first iteration
   
    final boolean merge;
    if (mergeIterationAuxTasks && pspn.getOutgoingChannels().size() == 1) {
      final Channel c = pspn.getOutgoingChannels().get(0);
      final PlanNode successor = c.getTarget();
      merge = c.getShipStrategy() == ShipStrategyType.FORWARD &&
          c.getLocalStrategy() == LocalStrategy.NONE &&
          c.getTempMode() == TempMode.NONE &&
          successor.getDegreeOfParallelism() == pspn.getDegreeOfParallelism() &&
          successor.getSubtasksPerInstance() == pspn.getSubtasksPerInstance() &&
          !(successor instanceof NAryUnionPlanNode) &&
          successor != iteration.getRootOfStepFunction() &&
          iteration.getInput().getLocalStrategy() == LocalStrategy.NONE;
    } else {
      merge = false;
    }
   
    // create or adopt the head vertex
    final JobTaskVertex toReturn;
    final JobTaskVertex headVertex;
    final TaskConfig headConfig;
    if (merge) {
      final PlanNode successor = pspn.getOutgoingChannels().get(0).getTarget();
      headVertex = (JobTaskVertex) this.vertices.get(successor);
     
      if (headVertex == null) {
        throw new CompilerException(
          "Bug: Trying to merge solution set with its sucessor, but successor has not been created.");
View Full Code Here

    //    this translates to a local strategy that would only be executed in the first superstep
   
    final boolean merge;
    if (mergeIterationAuxTasks && wspn.getOutgoingChannels().size() == 1) {
      final Channel c = wspn.getOutgoingChannels().get(0);
      final PlanNode successor = c.getTarget();
      merge = c.getShipStrategy() == ShipStrategyType.FORWARD &&
          c.getLocalStrategy() == LocalStrategy.NONE &&
          c.getTempMode() == TempMode.NONE &&
          successor.getDegreeOfParallelism() == wspn.getDegreeOfParallelism() &&
          successor.getSubtasksPerInstance() == wspn.getSubtasksPerInstance() &&
          !(successor instanceof NAryUnionPlanNode) &&
          successor != iteration.getNextWorkSetPlanNode() &&
          iteration.getInitialWorksetInput().getLocalStrategy() == LocalStrategy.NONE;
    } else {
      merge = false;
    }
   
    // create or adopt the head vertex
    final JobTaskVertex toReturn;
    final JobTaskVertex headVertex;
    final TaskConfig headConfig;
    if (merge) {
      final PlanNode successor = wspn.getOutgoingChannels().get(0).getTarget();
      headVertex = (JobTaskVertex) this.vertices.get(successor);
     
      if (headVertex == null) {
        throw new CompilerException(
          "Bug: Trying to merge solution set with its sucessor, but successor has not been created.");
View Full Code Here

    }
   
   
    // ----------------------------- create the iteration tail ------------------------------
   
    final PlanNode rootOfTerminationCriterion = bulkNode.getRootOfTerminationCriterion();
    final PlanNode rootOfStepFunction = bulkNode.getRootOfStepFunction();
    final TaskConfig tailConfig;
   
    JobTaskVertex rootOfStepFunctionVertex = (JobTaskVertex) this.vertices.get(rootOfStepFunction);
    if (rootOfStepFunctionVertex == null) {
      // last op is chained
      final TaskInChain taskInChain = this.chainedTasks.get(rootOfStepFunction);
      if (taskInChain == null) {
        throw new CompilerException("Bug: Tail of step function not found as vertex or chained task.");
      }
      rootOfStepFunctionVertex = (JobTaskVertex) taskInChain.getContainingVertex();

      // the fake channel is statically typed to pact record. no data is sent over this channel anyways.
      tailConfig = taskInChain.getTaskConfig();
    } else {
      tailConfig = new TaskConfig(rootOfStepFunctionVertex.getConfiguration());
    }
   
    tailConfig.setIsWorksetUpdate();
   
    // No following termination criterion
    if(rootOfStepFunction.getOutgoingChannels().isEmpty()) {
     
      rootOfStepFunctionVertex.setTaskClass(IterationTailPactTask.class);
     
      tailConfig.setOutputSerializer(bulkNode.getSerializerForIterationChannel());
      tailConfig.addOutputShipStrategy(ShipStrategyType.FORWARD);
View Full Code Here

      // we have three possible cases:
      // 1) Two tails, one for workset update, one for solution set update
      // 2) One tail for workset update, solution set update happens in an intermediate task
      // 3) One tail for solution set update, workset update happens in an intermediate task
     
      final PlanNode nextWorksetNode = iterNode.getNextWorkSetPlanNode();
      final PlanNode solutionDeltaNode = iterNode.getSolutionSetDeltaPlanNode();
     
      final boolean hasWorksetTail = nextWorksetNode.getOutgoingChannels().isEmpty();
      final boolean hasSolutionSetTail = (!iterNode.isImmediateSolutionSetUpdate()) || (!hasWorksetTail);
     
      {
View Full Code Here

TOP

Related Classes of eu.stratosphere.compiler.plan.PlanNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.