Package org.apache.commons.collections.map

Examples of org.apache.commons.collections.map.MultiValueMap$ValuesIterator


        matchingRulesList = new ArrayList<MatchingRule>();;
        objectClassesList = new ArrayList<ObjectClass>();
        syntaxesList = new ArrayList<LdapSyntax>();

        // Maps
        schemasMap = new MultiValueMap();
        attributeTypesMap = new MultiValueMap();
        matchingRulesMap = new MultiValueMap();
        objectClassesMap = new MultiValueMap();
        syntaxesMap = new MultiValueMap();

        // Listeners
        schemaHandlerListeners = new ArrayList<SchemaHandlerListener>();
    }
View Full Code Here


   * @param logs
   * @throws IOException
   */
  void replayLog(List<File> logs) throws Exception {
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    // seed both with the highest known sequence of either the tnxid or woid
    long transactionIDSeed = lastCheckpoint, writeOrderIDSeed = lastCheckpoint;
    LOG.info("Starting replay of " + logs);
    //Load the inflight puts into the transaction map to see if they were
    //committed in one of the logs.
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }
    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    try {
      for (File log : logs) {
        LOG.info("Replaying " + log);
        try {
          LogFile.SequentialReader reader =
              LogFileFactory.getSequentialReader(log, encryptionKeyProvider);
          reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
          Preconditions.checkState(!readers.containsKey(reader.getLogFileID()),
              "Readers " + readers + " already contains "
                  + reader.getLogFileID());
          readers.put(reader.getLogFileID(), reader);
          LogRecord logRecord = reader.next();
          if(logRecord == null) {
            readers.remove(reader.getLogFileID());
            reader.close();
          } else {
            logRecordBuffer.add(logRecord);
          }
        } catch(EOFException e) {
          LOG.warn("Ignoring " + log + " due to EOF", e);
        }
      }
      LogRecord entry = null;
      FlumeEventPointer ptr = null;
      while ((entry = next()) != null) {
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = entry.getFileID();
        int offset = entry.getOffset();
        TransactionEventRecord record = entry.getEvent();
        short type = record.getRecordType();
        long trans = record.getTransactionID();
        transactionIDSeed = Math.max(transactionIDSeed, trans);
        writeOrderIDSeed = Math.max(writeOrderIDSeed,
            record.getLogWriteOrderID());
        readCount++;
        if(readCount % 10000 == 0 && readCount > 0) {
          LOG.info("Read " + readCount + " records");
        }
        if (record.getLogWriteOrderID() > lastCheckpoint) {
          if (type == TransactionEventRecord.Type.PUT.get()) {
            putCount++;
            ptr = new FlumeEventPointer(fileId, offset);
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.TAKE.get()) {
            takeCount++;
            Take take = (Take) record;
            ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
            rollbackCount++;
            transactionMap.remove(trans);
          } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
            commitCount++;
            @SuppressWarnings("unchecked")
            Collection<FlumeEventPointer> pointers =
              (Collection<FlumeEventPointer>) transactionMap.remove(trans);
            if (((Commit) record).getType()
                    == TransactionEventRecord.Type.TAKE.get()) {
              if (inflightTakes.containsKey(trans)) {
                if(pointers == null){
                  pointers = Sets.newHashSet();
View Full Code Here

   */
  @Deprecated
  void replayLogv1(List<File> logs) throws Exception {
    int total = 0;
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    //Read inflight puts to see if they were committed
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }

    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    LOG.info("Starting replay of " + logs);
    for (File log : logs) {
      LOG.info("Replaying " + log);
      LogFile.SequentialReader reader = null;
      try {
        reader = LogFileFactory.getSequentialReader(log, encryptionKeyProvider);
        reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
        LogRecord entry;
        FlumeEventPointer ptr;
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = reader.getLogFileID();

        while ((entry = reader.next()) != null) {
          int offset = entry.getOffset();
          TransactionEventRecord record = entry.getEvent();
          short type = record.getRecordType();
          long trans = record.getTransactionID();
          readCount++;
          if (record.getLogWriteOrderID() > lastCheckpoint) {
            if (type == TransactionEventRecord.Type.PUT.get()) {
              putCount++;
              ptr = new FlumeEventPointer(fileId, offset);
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.TAKE.get()) {
              takeCount++;
              Take take = (Take) record;
              ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
              rollbackCount++;
              transactionMap.remove(trans);
            } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
              commitCount++;
              @SuppressWarnings("unchecked")
              Collection<FlumeEventPointer> pointers =
                (Collection<FlumeEventPointer>) transactionMap.remove(trans);
              if (((Commit) record).getType()
                      == TransactionEventRecord.Type.TAKE.get()) {
                if (inflightTakes.containsKey(trans)) {
                  if (pointers == null) {
                    pointers = Sets.newHashSet();
View Full Code Here

        {
            synchronized(this)
            {
                if (fieldMap == null)
                {
                    fieldMap = new MultiValueMap();
                }
            }
        }
        return fieldMap;
    }
View Full Code Here

     * @see org.apache.jetspeed.om.common.GenericMetadata#getFields(java.lang.String)
     */
    public Collection getFields(String name)
    {
      //TODO:  return an immutable version?
        MultiValueMap fieldMap = getFieldMap(false);
        return (Collection)(fieldMap !=null ? fieldMap.get(name) : null);
    }
View Full Code Here

    /* (non-Javadoc)
     * @see org.apache.jetspeed.om.common.GenericMetadata#setFields(java.lang.String, java.util.Collection)
     */
    public void setFields(String name, Collection values)
    {
        MultiValueMap fieldMap = getFieldMap(false);
        if (fieldMap != null)
        {
            fieldMap.remove(name);
        }
       
        Iterator fieldIter = fields.iterator();
        while(fieldIter.hasNext())
        {
View Full Code Here

     */
    public void setFields(Collection fields)
    {
        this.fields = fields;

        MultiValueMap fieldMap = getFieldMap(false);
        if (fieldMap != null)
        {
            fieldMap.clear();
        }
       
        if(fields != null)
        {   
            Iterator fieldIter = fields.iterator();
View Full Code Here

                }
            }
        }
       
        // update field map
        MultiValueMap fieldMap = getFieldMap(false);
        if (fieldMap != null)
        {
            fieldMap.clear();
        }
       
        if (this.fields != null)
        {   
            Iterator fieldIter = this.fields.iterator();
View Full Code Here

                return;

            default:
                // We must duplicate the treeSet and the hashMap
                avas = new ArrayList<Ava>();
                avaTypes = new MultiValueMap();

                for ( Ava currentAva : rdn.avas )
                {
                    avas.add( (Ava) currentAva.clone() );
                    avaTypes.put( currentAva.getNormType(), currentAva );
View Full Code Here

                // First, create the HashMap,
                avas = new ArrayList<Ava>();

                // and store the existing Ava into it.
                avas.add( ava );
                avaTypes = new MultiValueMap();
                avaTypes.put( avaType, ava );

                ava = null;

                // Now, fall down to the commmon case
View Full Code Here

TOP

Related Classes of org.apache.commons.collections.map.MultiValueMap$ValuesIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.