Package cascading.flow.planner

Examples of cascading.flow.planner.Scope


  @Override
  public void initialize()
    {
    super.initialize();

    Scope outgoingScope = Util.getFirst( outgoingScopes );
    valueEntry = new TupleEntry( outgoingScope.getOutValuesFields(), true );
    }
View Full Code Here


  @Override
  public void initialize()
    {
    super.initialize();

    Scope outgoingScope = Util.getFirst( outgoingScopes );
    valueEntry = new TupleEntry( outgoingScope.getOutValuesFields(), true );
    }
View Full Code Here

      outGroupingFields = createJoinFields( incomingScopes, groupingSelectors, declared );

    // for Group, the outgoing fields are the same as those declared
    Scope.Kind kind = getScopeKind();

    return new Scope( getName(), declared, outGroupingFields, groupingSelectors, sortingSelectors, declared, kind );
    }
View Full Code Here

        Iterator<Scope> iterator = incomingScopes.iterator();
        Fields commonFields = iterator.next().getIncomingSpliceFields();

        while( iterator.hasNext() )
          {
          Scope incomingScope = iterator.next();
          Fields fields = incomingScope.getIncomingSpliceFields();

          if( !commonFields.equalsFields( fields ) )
            throw new OperatorException( this, "merged streams must declare the same field names, in the same order, expected: " + commonFields.printVerbose() + " found: " + fields.printVerbose() );
          }
View Full Code Here

      throw new OperatorException( this, "resolved wrong number of arguments: " + argumentSelector.printVerbose() + ", expected: " + operation.getNumArgs() );
    }

  Fields resolveOutgoingSelector( Set<Scope> incomingScopes, Fields argumentFields, Fields declaredFields )
    {
    Scope incomingScope = getFirst( incomingScopes );
    Fields outputSelector = getOutputSelector();

    if( outputSelector.isResults() )
      return declaredFields;

    if( outputSelector.isArguments() )
      return argumentFields;

    if( outputSelector.isGroup() )
      return incomingScope.getOutGroupingFields();

    if( outputSelector.isValues() )
      return incomingScope.getOutGroupingValueFields();

    Fields incomingFields = resolveIncomingOperationPassThroughFields( incomingScope );

    // not part of resolve as we need the argumentFields
    if( outputSelector.isSwap() )
View Full Code Here

    // the incoming fields eligible to be outgoing
    Fields passThroughFields = resolveIncomingOperationPassThroughFields( getFirst( incomingScopes ) );
    Fields remainderFields = resolveRemainderFields( incomingScopes, argumentFields );

    return new Scope( getName(), Scope.Kind.EACH, passThroughFields, remainderFields, argumentFields, declaredFields, outgoingGroupingFields, outgoingValuesFields );
    }
View Full Code Here

    }

  @Override
  public Scope outgoingScopeFor( Set<Scope> incomingScopes )
    {
    Scope incomingScope = getFirst( incomingScopes );

    if( !isBuffer() && incomingScope.getOutValuesFields().isNone() )
      throw new OperatorException( this, "only a Buffer may be preceded by a CoGroup declaring Fields.NONE as the join fields" );

    Fields argumentFields = resolveArgumentSelector( incomingScopes );

    verifyArguments( argumentFields );

    // we currently don't support using result from a previous Every in the current Every
    verifyAggregatorArguments( argumentFields, incomingScope );

    Fields declaredFields = resolveDeclared( incomingScopes, argumentFields );

    verifyDeclaredFields( declaredFields );

    Fields outgoingGroupingFields = resolveOutgoingGroupingSelector( incomingScopes, argumentFields, declaredFields );

    verifyOutputSelector( outgoingGroupingFields );

    Fields outgoingValuesFields = incomingScope.getOutValuesFields();

    // the incoming fields eligible to be outgoing, for Every only the grouping fields.
    Fields passThroughFields = resolveIncomingOperationPassThroughFields( incomingScope );
    Fields remainderFields = resolveRemainderFields( incomingScopes, argumentFields );

    return new Scope( getName(), Scope.Kind.EVERY, passThroughFields, remainderFields, argumentFields, declaredFields, outgoingGroupingFields, outgoingValuesFields );
    }
View Full Code Here

      {
      sortFields = new Fields[ size ];
      sortBuilder = new TupleBuilder[ size ];
      }

    Scope outgoingScope = outgoingScopes.get( 0 );

    int numScopes = Math.min( size, incomingScopes.size() );
    for( int i = 0; i < numScopes; i++ )
      {
      Scope incomingScope = incomingScopes.get( i );

      // for GroupBy, incoming may have same name, but guaranteed to have same key/value/sort fields for merge
      // arrays may be size 1, then ordinal should always be zero.
      int ordinal = size == 1 ? 0 : incomingScope.getOrdinal();

      keyFields[ ordinal ] = outgoingScope.getKeySelectors().get( incomingScope.getName() );
      valuesFields[ ordinal ] = incomingScope.getIncomingSpliceFields();

      keyBuilder[ ordinal ] = createNarrowBuilder( incomingScope.getIncomingSpliceFields(), keyFields[ ordinal ] );
      valuesBuilder[ ordinal ] = createNulledBuilder( incomingScope.getIncomingSpliceFields(), keyFields[ ordinal ] );

      if( sortFields != null )
        {
        sortFields[ ordinal ] = outgoingScope.getSortingSelectors().get( incomingScope.getName() );
        sortBuilder[ ordinal ] = createNarrowBuilder( incomingScope.getIncomingSpliceFields(), sortFields[ ordinal ] );
        }

      if( LOG.isDebugEnabled() )
        {
        LOG.debug( "incomingScope: {}, in pos: {}", incomingScope.getName(), ordinal );
        LOG.debug( "keyFields: {}", printSafe( keyFields[ ordinal ] ) );
        LOG.debug( "valueFields: {}", printSafe( valuesFields[ ordinal ] ) );

        if( sortFields != null )
          LOG.debug( "sortFields: {}", printSafe( sortFields[ ordinal ] ) );
View Full Code Here

    int size = splice.isGroupBy() ? 1 : getNumDeclaredIncomingBranches();

    for( int i = 0; i < size; i++ )
      {
      Scope incomingScope = incomingScopes.get( i );

      int pos = splice.isGroupBy() ? 0 : splice.getPipePos().get( incomingScope.getName() );

      // we want the comparators
      Fields groupFields = splice.getKeySelectors().get( incomingScope.getName() );

      compareFields[ pos ] = groupFields; // used for finding hashers

      if( groupFields.size() == 0 )
        groupComparators[ pos ] = groupFields;
      else
        groupComparators[ pos ] = new SparseTupleComparator( Fields.asDeclaration( groupFields ), defaultComparator );

      groupComparators[ pos ] = splice.isSortReversed() ? Collections.reverseOrder( groupComparators[ pos ] ) : groupComparators[ pos ];

      if( sortFields != null )
        {
        // we want the comparators, so don't use sortFields array
        Fields sortFields = splice.getSortingSelectors().get( incomingScope.getName() );
        valueComparators[ pos ] = new SparseTupleComparator( valuesFields[ pos ], sortFields, defaultComparator );

        if( splice.isSortReversed() )
          valueComparators[ pos ] = Collections.reverseOrder( valueComparators[ pos ] );
        }
View Full Code Here

    }

  @Override
  public void initialize()
    {
    Scope outgoingScope = outgoingScopes.get( 0 );

    operationCall = new ConcreteCall( outgoingScope.getArgumentsDeclarator(), outgoingScope.getOperationDeclaredFields() );

    argumentsSelector = outgoingScope.getArgumentsSelector();
    remainderFields = outgoingScope.getRemainderPassThroughFields();
    outgoingSelector = getOutgoingSelector();

    argumentsEntry = new TupleEntry( outgoingScope.getArgumentsDeclarator(), true );

    outgoingEntry = new TupleEntry( getOutgoingFields(), true );

    operationCall.setArguments( argumentsEntry );
View Full Code Here

TOP

Related Classes of cascading.flow.planner.Scope

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.