Package org.apache.commons.collections.map

Examples of org.apache.commons.collections.map.MultiValueMap$ValuesIterator


   */
  @Deprecated
  void replayLogv1(List<File> logs) throws Exception {
    int total = 0;
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    //Read inflight puts to see if they were committed
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }

    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    LOG.info("Starting replay of " + logs);
    for (File log : logs) {
      LOG.info("Replaying " + log);
      LogFile.SequentialReader reader = null;
      try {
        reader = LogFileFactory.getSequentialReader(log, encryptionKeyProvider);
        reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
        LogRecord entry;
        FlumeEventPointer ptr;
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = reader.getLogFileID();
        int readCount = 0;
        int putCount = 0;
        int takeCount = 0;
        int rollbackCount = 0;
        int commitCount = 0;
        int skipCount = 0;
        while ((entry = reader.next()) != null) {
          int offset = entry.getOffset();
          TransactionEventRecord record = entry.getEvent();
          short type = record.getRecordType();
          long trans = record.getTransactionID();
          readCount++;
          if (record.getLogWriteOrderID() > lastCheckpoint) {
            if (type == TransactionEventRecord.Type.PUT.get()) {
              putCount++;
              ptr = new FlumeEventPointer(fileId, offset);
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.TAKE.get()) {
              takeCount++;
              Take take = (Take) record;
              ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
              rollbackCount++;
              transactionMap.remove(trans);
            } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
              commitCount++;
              @SuppressWarnings("unchecked")
              Collection<FlumeEventPointer> pointers =
                (Collection<FlumeEventPointer>) transactionMap.remove(trans);
              if (((Commit) record).getType()
                      == TransactionEventRecord.Type.TAKE.get()) {
                if (inflightTakes.containsKey(trans)) {
                  if (pointers == null) {
                    pointers = Sets.newHashSet();
View Full Code Here


   * @param logs
   * @throws IOException
   */
  void replayLog(List<File> logs) throws Exception {
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    // seed both with the highest known sequence of either the tnxid or woid
    long transactionIDSeed = lastCheckpoint, writeOrderIDSeed = lastCheckpoint;
    LOG.info("Starting replay of " + logs);
    //Load the inflight puts into the transaction map to see if they were
    //committed in one of the logs.
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }
    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    try {
      for (File log : logs) {
        LOG.info("Replaying " + log);
        try {
          LogFile.SequentialReader reader =
              LogFileFactory.getSequentialReader(log, encryptionKeyProvider);
          reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
          Preconditions.checkState(!readers.containsKey(reader.getLogFileID()),
              "Readers " + readers + " already contains "
                  + reader.getLogFileID());
          readers.put(reader.getLogFileID(), reader);
          LogRecord logRecord = reader.next();
          if(logRecord == null) {
            readers.remove(reader.getLogFileID());
            reader.close();
          } else {
            logRecordBuffer.add(logRecord);
          }
        } catch(EOFException e) {
          LOG.warn("Ignoring " + log + " due to EOF", e);
        }
      }
      LogRecord entry = null;
      FlumeEventPointer ptr = null;
      int readCount = 0;
      int putCount = 0;
      int takeCount = 0;
      int rollbackCount = 0;
      int commitCount = 0;
      int skipCount = 0;
      while ((entry = next()) != null) {
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = entry.getFileID();
        int offset = entry.getOffset();
        TransactionEventRecord record = entry.getEvent();
        short type = record.getRecordType();
        long trans = record.getTransactionID();
        transactionIDSeed = Math.max(transactionIDSeed, trans);
        writeOrderIDSeed = Math.max(writeOrderIDSeed,
            record.getLogWriteOrderID());
        readCount++;
        if (record.getLogWriteOrderID() > lastCheckpoint) {
          if (type == TransactionEventRecord.Type.PUT.get()) {
            putCount++;
            ptr = new FlumeEventPointer(fileId, offset);
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.TAKE.get()) {
            takeCount++;
            Take take = (Take) record;
            ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
            rollbackCount++;
            transactionMap.remove(trans);
          } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
            commitCount++;
            @SuppressWarnings("unchecked")
            Collection<FlumeEventPointer> pointers =
              (Collection<FlumeEventPointer>) transactionMap.remove(trans);
            if (((Commit) record).getType()
                    == TransactionEventRecord.Type.TAKE.get()) {
              if (inflightTakes.containsKey(trans)) {
                if(pointers == null){
                  pointers = Sets.newHashSet();
View Full Code Here

    private ProjectsHandler()
    {
        projectsList = new ArrayList<Project>();
        projectsMap = new HashMap<String, Project>();
        projectsHandlerListeners = new ArrayList<ProjectsHandlerListener>();
        projectsListeners = new MultiValueMap();
    }
View Full Code Here

   */
  @Deprecated
  void replayLogv1(List<File> logs) throws Exception {
    int total = 0;
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    //Read inflight puts to see if they were committed
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }

    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    LOG.info("Starting replay of " + logs);
    for (File log : logs) {
      LOG.info("Replaying " + log);
      LogFile.SequentialReader reader = null;
      try {
        reader = LogFileFactory.getSequentialReader(log,
          encryptionKeyProvider, fsyncPerTransaction);
        reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
        LogRecord entry;
        FlumeEventPointer ptr;
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = reader.getLogFileID();

        while ((entry = reader.next()) != null) {
          int offset = entry.getOffset();
          TransactionEventRecord record = entry.getEvent();
          short type = record.getRecordType();
          long trans = record.getTransactionID();
          readCount++;
          if (record.getLogWriteOrderID() > lastCheckpoint) {
            if (type == TransactionEventRecord.Type.PUT.get()) {
              putCount++;
              ptr = new FlumeEventPointer(fileId, offset);
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.TAKE.get()) {
              takeCount++;
              Take take = (Take) record;
              ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
              rollbackCount++;
              transactionMap.remove(trans);
            } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
              commitCount++;
              @SuppressWarnings("unchecked")
              Collection<FlumeEventPointer> pointers =
                (Collection<FlumeEventPointer>) transactionMap.remove(trans);
              if (((Commit) record).getType()
                      == TransactionEventRecord.Type.TAKE.get()) {
                if (inflightTakes.containsKey(trans)) {
                  if (pointers == null) {
                    pointers = Sets.newHashSet();
View Full Code Here

   * @param logs
   * @throws IOException
   */
  void replayLog(List<File> logs) throws Exception {
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    // seed both with the highest known sequence of either the tnxid or woid
    long transactionIDSeed = lastCheckpoint, writeOrderIDSeed = lastCheckpoint;
    LOG.info("Starting replay of " + logs);
    //Load the inflight puts into the transaction map to see if they were
    //committed in one of the logs.
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }
    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    try {
      for (File log : logs) {
        LOG.info("Replaying " + log);
        try {
          LogFile.SequentialReader reader =
            LogFileFactory.getSequentialReader(log, encryptionKeyProvider,
              fsyncPerTransaction);
          reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
          Preconditions.checkState(!readers.containsKey(reader.getLogFileID()),
              "Readers " + readers + " already contains "
                  + reader.getLogFileID());
          readers.put(reader.getLogFileID(), reader);
          LogRecord logRecord = reader.next();
          if(logRecord == null) {
            readers.remove(reader.getLogFileID());
            reader.close();
          } else {
            logRecordBuffer.add(logRecord);
          }
        } catch(EOFException e) {
          LOG.warn("Ignoring " + log + " due to EOF", e);
        }
      }
      LogRecord entry = null;
      FlumeEventPointer ptr = null;
      while ((entry = next()) != null) {
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = entry.getFileID();
        int offset = entry.getOffset();
        TransactionEventRecord record = entry.getEvent();
        short type = record.getRecordType();
        long trans = record.getTransactionID();
        transactionIDSeed = Math.max(transactionIDSeed, trans);
        writeOrderIDSeed = Math.max(writeOrderIDSeed,
            record.getLogWriteOrderID());
        readCount++;
        if(readCount % 10000 == 0 && readCount > 0) {
          LOG.info("read: " + readCount + ", put: " + putCount + ", take: "
              + takeCount + ", rollback: " + rollbackCount + ", commit: "
              + commitCount + ", skip: " + skipCount + ", eventCount:" + count);
        }
        if (record.getLogWriteOrderID() > lastCheckpoint) {
          if (type == TransactionEventRecord.Type.PUT.get()) {
            putCount++;
            ptr = new FlumeEventPointer(fileId, offset);
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.TAKE.get()) {
            takeCount++;
            Take take = (Take) record;
            ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
            rollbackCount++;
            transactionMap.remove(trans);
          } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
            commitCount++;
            @SuppressWarnings("unchecked")
            Collection<FlumeEventPointer> pointers =
              (Collection<FlumeEventPointer>) transactionMap.remove(trans);
            if (((Commit) record).getType()
                    == TransactionEventRecord.Type.TAKE.get()) {
              if (inflightTakes.containsKey(trans)) {
                if(pointers == null){
                  pointers = Sets.newHashSet();
View Full Code Here

   
    private void addFieldsToParsedObject(Document doc, ParsedObject o)
    {
        try
        {
            MultiMap multiKeywords = new MultiValueMap();
            MultiMap multiFields = new MultiValueMap();
            HashMap fieldMap = new HashMap();
           
            Field classNameField = doc.getField(ParsedObject.FIELDNAME_CLASSNAME);
            if(classNameField != null)
            {
View Full Code Here

  }

  void replayLog(List<File> logs) throws IOException {
    int total = 0;
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    LOG.info("Starting replay of " + logs);
    for (File log : logs) {
      LOG.info("Replaying " + log);
      LogFile.SequentialReader reader = null;
      try {
        reader = new LogFile.SequentialReader(log);
        reader.skipToLastCheckpointPosition(queue.getTimestamp());
        Pair<Integer, TransactionEventRecord> entry;
        FlumeEventPointer ptr;
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = reader.getLogFileID();
        int readCount = 0;
        int putCount = 0;
        int takeCount = 0;
        int rollbackCount = 0;
        int commitCount = 0;
        int skipCount = 0;
        while ((entry = reader.next()) != null) {
          int offset = entry.getLeft();
          TransactionEventRecord record = entry.getRight();
          short type = record.getRecordType();
          long trans = record.getTransactionID();
          readCount++;
          if (record.getTimestamp() > lastCheckpoint) {
            if (type == TransactionEventRecord.Type.PUT.get()) {
              putCount++;
              ptr = new FlumeEventPointer(fileId, offset);
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.TAKE.get()) {
              takeCount++;
              Take take = (Take) record;
              ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
              rollbackCount++;
              transactionMap.remove(trans);
            } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
              commitCount++;
              @SuppressWarnings("unchecked")
              Collection<FlumeEventPointer> pointers =
                (Collection<FlumeEventPointer>) transactionMap.remove(trans);
              if (pointers != null && pointers.size() > 0) {
                processCommit(((Commit) record).getType(), pointers);
                count += pointers.size();
              }
            } else {
View Full Code Here

                return;

            default:
                // We must duplicate the treeSet and the hashMap
                avas = new ArrayList<Ava>();
                avaTypes = new MultiValueMap();

                for ( Ava currentAva : rdn.avas )
                {
                    avas.add( currentAva.clone() );
                    avaTypes.put( currentAva.getNormType(), currentAva );
View Full Code Here

                // First, create the HashMap,
                avas = new ArrayList<Ava>();

                // and store the existing Ava into it.
                avas.add( ava );
                avaTypes = new MultiValueMap();
                avaTypes.put( avaType, ava );

                ava = null;

                // Now, fall down to the commmon case
View Full Code Here

                // First, create the HashMap,
                avas = new ArrayList<Ava>();

                // and store the existing Ava into it.
                avas.add( ava );
                avaTypes = new MultiValueMap();
                avaTypes.put( avaType, ava );

                this.ava = null;

                // Now, fall down to the commmon case
View Full Code Here

TOP

Related Classes of org.apache.commons.collections.map.MultiValueMap$ValuesIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.