Examples of EditLogFileInputStream


Examples of org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream

          // reached the maximum number of retries
          throw e;
        }
      }
    }
    EditLogFileInputStream fileEditsIn =
        new EditLogFileInputStream(tempEditsFile);
    for (int i = 0; i <= numEdits; i++) {
      FSEditLogOp opFromBk = bkEditsIn.readOp();
      FSEditLogOp opFromFile = fileEditsIn.readOp();
      if (LOG.isDebugEnabled()) {
        LOG.debug("txId = " + i + ", " + "opFromBk = " + opFromBk +
            ", opFromFile = " + opFromFile);
      }
      assertEquals(
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream

          new EditLogFileOutputStream(tempEditsFile, null);
      elfos.create();
      FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
      EditLogInputStream bkeis =
          FSEditLogTestUtil.getJournalInputStream(bkjm, 1, true);
      EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
      Map<String, EditLogInputStream> streamByName =
          ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
      FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
      assertNotNull("Log was validated", h.logValidation);
      assertEquals("numTrasactions validated correctly",
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream

          new EditLogFileOutputStream(tempEditsFile, null);
      elfos.create();
      FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
      EditLogInputStream bkeis =
          getJournalInputStreamDontCheckLastTxId(bkjm, 1);
      EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
      Map<String, EditLogInputStream> streamByName =
          ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
      FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
    } finally {
      if (!tempEditsFile.delete()) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream

   * @return number of edits loaded
   * @throws IOException
   */
  int loadFSEdits(StorageDirectory sd) throws IOException {
    int numEdits = 0;
    EditLogFileInputStream edits =
      new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS));
    numEdits = FSEditLog.loadFSEdits(edits);
    edits.close();
    File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
    if (editsNew.exists() && editsNew.length() > 0) {
      edits = new EditLogFileInputStream(editsNew);
      numEdits += FSEditLog.loadFSEdits(edits);
      edits.close();
    }
    // update the counts.
    FSNamesystem.getFSNamesystem().dir.updateCountForINodeWithQuota();   
    return numEdits;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream

   * @return number of edits loaded
   * @throws IOException
   */
  int loadFSEdits(StorageDirectory sd) throws IOException {
    int numEdits = 0;
    EditLogFileInputStream edits =
      new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS));
    numEdits = FSEditLog.loadFSEdits(edits);
    edits.close();
    File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
    if (editsNew.exists() && editsNew.length() > 0) {
      edits = new EditLogFileInputStream(editsNew);
      numEdits += FSEditLog.loadFSEdits(edits);
      edits.close();
    }
    // update the counts.
    FSNamesystem.getFSNamesystem().dir.updateCountForINodeWithQuota();   
    return numEdits;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream

   * @throws IOException
   */
  int loadFSEdits(StorageDirectory sd, MetaRecoveryContext recovery)
      throws IOException {
    int numEdits = 0;
    EditLogFileInputStream edits =
      new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS));
    numEdits = FSEditLog.loadFSEdits(edits, editsTolerationLength, recovery);
    edits.close();
    File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
    if (editsNew.exists() && editsNew.length() > 0) {
      edits = new EditLogFileInputStream(editsNew);
      numEdits += FSEditLog.loadFSEdits(edits, editsTolerationLength, recovery);
      edits.close();
    }
    // update the counts.
    FSNamesystem.getFSNamesystem().dir.updateCountForINodeWithQuota();   
    return numEdits;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream

    //
    for (Iterator<StorageDirectory> it =
            fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
      File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
      System.out.println("Verifying file: " + editFile);
      int numEdits = FSEditLog.loadFSEdits(new EditLogFileInputStream(editFile));
      int numLeases = FSNamesystem.getFSNamesystem().leaseManager.countLease();
      System.out.println("Number of outstanding leases " + numLeases);
      assertEquals(0, numLeases);
      assertTrue("Verification for " + editFile + " failed. " +
                 "Expected " + (numThreads * 2 * numberTransactions) + " transactions. "+
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream

   * @return number of edits loaded
   * @throws IOException
   */
  int loadFSEdits(StorageDirectory sd) throws IOException {
    int numEdits = 0;
    EditLogFileInputStream edits =
      new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS));
    numEdits = FSEditLog.loadFSEdits(edits);
    edits.close();
    File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
    if (editsNew.exists() && editsNew.length() > 0) {
      edits = new EditLogFileInputStream(editsNew);
      numEdits += FSEditLog.loadFSEdits(edits);
      edits.close();
    }
    // update the counts.
    FSNamesystem.getFSNamesystem().dir.updateCountForINodeWithQuota();   
    return numEdits;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream

   * @return number of edits loaded
   * @throws IOException
   */
  int loadFSEdits(StorageDirectory sd) throws IOException {
    int numEdits = 0;
    EditLogFileInputStream edits =
      new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS));
    numEdits = FSEditLog.loadFSEdits(edits);
    edits.close();
    File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
    if (editsNew.exists() && editsNew.length() > 0) {
      edits = new EditLogFileInputStream(editsNew);
      numEdits += FSEditLog.loadFSEdits(edits);
      edits.close();
    }
    // update the counts.
    FSNamesystem.getFSNamesystem().dir.updateCountForINodeWithQuota();   
    return numEdits;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream

      for (Iterator<StorageDirectory> it =
              fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
        File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
        System.out.println("Verifying file: " + editFile);
        int numEdits = FSEditLog.loadFSEdits(
            new EditLogFileInputStream(editFile), -1, null);
        assertTrue("Verification for " + editFile + " failed. " +
                   "Expected " + (NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys) + " transactions. "+
                   "Found " + numEdits + " transactions.",
                   numEdits == NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS +numKeys);
 
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.