Examples of MiniJournalCluster


Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

   
    LOG.info("nn1Dir=" + nn1Dir);
    LOG.info("nn2Dir=" + nn2Dir);

    final Configuration conf = new HdfsConfiguration();
    final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build();
    setConf(conf, nn1Dir, mjc);

    {
      // Start the cluster once to generate the dfs dirs
      final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

   
    LOG.info("nn1Dir=" + nn1Dir);
    LOG.info("nn2Dir=" + nn2Dir);

    final Configuration conf = new HdfsConfiguration();
    final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build();
    setConf(conf, nn1Dir, mjc);

    {
      // Start the cluster once to generate the dfs dirs
      final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

  }

  @Test
  public void testRollbackWithQJM() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniJournalCluster mjc = null;
    MiniDFSCluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");

    try {
      mjc = new MiniJournalCluster.Builder(conf).numJournalNodes(
          NUM_JOURNAL_NODES).build();
      conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc
          .getQuorumJournalURI(JOURNAL_ID).toString());
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
      cluster.waitActive();

      DistributedFileSystem dfs = cluster.getFileSystem();
      final DFSAdmin dfsadmin = new DFSAdmin(conf);
      dfs.mkdirs(foo);

      // start rolling upgrade
      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      Assert.assertEquals(0,
          dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
      // create new directory
      dfs.mkdirs(bar);
      dfs.close();

      // rollback
      cluster.restartNameNode("-rollingUpgrade", "rollback");
      // make sure /foo is still there, but /bar is not
      dfs = cluster.getFileSystem();
      Assert.assertTrue(dfs.exists(foo));
      Assert.assertFalse(dfs.exists(bar));

      // check storage in JNs
      for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
        File dir = mjc.getCurrentDir(0, JOURNAL_ID);
        // segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
        // marker, mkdir, endSegment)
        checkJNStorage(dir, 4, 7);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
      if (mjc != null) {
        mjc.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

   
    LOG.info("nn1Dir=" + nn1Dir);
    LOG.info("nn2Dir=" + nn2Dir);

    final Configuration conf = new HdfsConfiguration();
    final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build();
    setConf(conf, nn1Dir, mjc);

    {
      // Start the cluster once to generate the dfs dirs
      final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

   * bounds for the other test cases, so they can exhaustively explore
   * the space of potential failures.
   */
  private static long determineMaxIpcNumber() throws Exception {
    Configuration conf = new Configuration();
    MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
    QuorumJournalManager qjm = null;
    long ret;
    try {
      qjm = createInjectableQJM(cluster);
      qjm.format(FAKE_NSINFO);
      doWorkload(cluster, qjm);
     
      SortedSet<Integer> ipcCounts = Sets.newTreeSet();
      for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
        InvocationCountingChannel ch = (InvocationCountingChannel)l;
        ch.waitForAllPendingCalls();
        ipcCounts.add(ch.getRpcCount());
      }
 
      // All of the loggers should have sent the same number of RPCs, since there
      // were no failures.
      assertEquals(1, ipcCounts.size());
     
      ret = ipcCounts.first();
      LOG.info("Max IPC count = " + ret);
    } finally {
      IOUtils.closeStream(qjm);
      cluster.shutdown();
    }
    return ret;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

       
        LOG.info("\n\n-------------------------------------------\n" +
            "Beginning test, failing at " + injectionStr + "\n" +
            "-------------------------------------------\n\n");
       
        MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
          .build();
        QuorumJournalManager qjm = null;
        try {
          qjm = createInjectableQJM(cluster);
          qjm.format(FAKE_NSINFO);
          List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
          failIpcNumber(loggers.get(0), failA);
          failIpcNumber(loggers.get(1), failB);
          int lastAckedTxn = doWorkload(cluster, qjm);

          if (lastAckedTxn < 6) {
            LOG.info("Failed after injecting failures at " + injectionStr +
                ". This is expected since we injected a failure in the " +
                "majority.");
          }
          qjm.close();
          qjm = null;

          // Now should be able to recover
          qjm = createInjectableQJM(cluster);
          long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
          assertTrue(lastRecoveredTxn >= lastAckedTxn);
         
          writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
        } catch (Throwable t) {
          // Test failure! Rethrow with the test setup info so it can be
          // easily triaged.
          throw new RuntimeException("Test failed with injection: " + injectionStr,
                t);
        } finally {
          cluster.shutdown();
          cluster = null;
          IOUtils.closeStream(qjm);
          qjm = null;
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

    }
    LOG.info("Random seed: " + seed);
   
    Random r = new Random(seed);
   
    MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
      .build();
   
    // Format the cluster using a non-faulty QJM.
    QuorumJournalManager qjmForInitialFormat =
        createInjectableQJM(cluster);
    qjmForInitialFormat.format(FAKE_NSINFO);
    qjmForInitialFormat.close();
   
    try {
      long txid = 0;
      long lastAcked = 0;
     
      for (int i = 0; i < NUM_WRITER_ITERS; i++) {
        LOG.info("Starting writer " + i + "\n-------------------");
       
        QuorumJournalManager qjm = createRandomFaultyQJM(cluster, r);
        try {
          long recovered;
          try {
            recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
          } catch (Throwable t) {
            LOG.info("Failed recovery", t);
            checkException(t);
            continue;
          }
          assertTrue("Recovered only up to txnid " + recovered +
              " but had gotten an ack for " + lastAcked,
              recovered >= lastAcked);
         
          txid = recovered + 1;
         
          // Periodically purge old data on disk so it's easier to look
          // at failure cases.
          if (txid > 100 && i % 10 == 1) {
            qjm.purgeLogsOlderThan(txid - 100);
          }

          Holder<Throwable> thrown = new Holder<Throwable>(null);
          for (int j = 0; j < SEGMENTS_PER_WRITER; j++) {
            lastAcked = writeSegmentUntilCrash(cluster, qjm, txid, 4, thrown);
            if (thrown.held != null) {
              LOG.info("Failed write", thrown.held);
              checkException(thrown.held);
              break;
            }
            txid += 4;
          }
        } finally {
          qjm.close();
        }
      }
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

  private final Random r = new Random();
 
  @Test
  public void testSingleThreaded() throws IOException {
    Configuration conf = new Configuration();
    MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
    URI uri = cluster.getQuorumJournalURI(JID);
    QuorumJournalManager qjm = new QuorumJournalManager(
        conf, uri, FAKE_NSINFO);
    try {
      qjm.format(FAKE_NSINFO);
    } finally {
      qjm.close();
    }
   
    try {
      // With no failures or contention, epochs should increase one-by-one
      for (int i = 0; i < 5; i++) {
        qjm = new QuorumJournalManager(
            conf, uri, FAKE_NSINFO);
        try {
          qjm.createNewUniqueEpoch();
          assertEquals(i + 1, qjm.getLoggerSetForTests().getEpoch());
        } finally {
          qjm.close();
        }
      }
     
      long prevEpoch = 5;
      // With some failures injected, it should still always increase, perhaps
      // skipping some
      for (int i = 0; i < 20; i++) {
        long newEpoch = -1;
        while (true) {
          qjm = new QuorumJournalManager(
              conf, uri, FAKE_NSINFO, new FaultyLoggerFactory());
          try {
            qjm.createNewUniqueEpoch();
            newEpoch = qjm.getLoggerSetForTests().getEpoch();
            break;
          } catch (IOException ioe) {
            // It's OK to fail to create an epoch, since we randomly inject
            // faults. It's possible we'll inject faults in too many of the
            // underlying nodes, and a failure is expected in that case
          } finally {
            qjm.close();
          }
        }
        LOG.info("Created epoch " + newEpoch);
        assertTrue("New epoch " + newEpoch + " should be greater than previous " +
            prevEpoch, newEpoch > prevEpoch);
        prevEpoch = newEpoch;
      }
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

   * bounds for the other test cases, so they can exhaustively explore
   * the space of potential failures.
   */
  private static long determineMaxIpcNumber() throws Exception {
    Configuration conf = new Configuration();
    MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
    QuorumJournalManager qjm = null;
    long ret;
    try {
      qjm = createInjectableQJM(cluster);
      qjm.format(FAKE_NSINFO);
      doWorkload(cluster, qjm);
     
      SortedSet<Integer> ipcCounts = Sets.newTreeSet();
      for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
        InvocationCountingChannel ch = (InvocationCountingChannel)l;
        ch.waitForAllPendingCalls();
        ipcCounts.add(ch.getRpcCount());
      }
 
      // All of the loggers should have sent the same number of RPCs, since there
      // were no failures.
      assertEquals(1, ipcCounts.size());
     
      ret = ipcCounts.first();
      LOG.info("Max IPC count = " + ret);
    } finally {
      IOUtils.closeStream(qjm);
      cluster.shutdown();
    }
    return ret;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniJournalCluster

       
        LOG.info("\n\n-------------------------------------------\n" +
            "Beginning test, failing at " + injectionStr + "\n" +
            "-------------------------------------------\n\n");
       
        MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
          .build();
        QuorumJournalManager qjm = null;
        try {
          qjm = createInjectableQJM(cluster);
          qjm.format(FAKE_NSINFO);
          List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
          failIpcNumber(loggers.get(0), failA);
          failIpcNumber(loggers.get(1), failB);
          int lastAckedTxn = doWorkload(cluster, qjm);

          if (lastAckedTxn < 6) {
            LOG.info("Failed after injecting failures at " + injectionStr +
                ". This is expected since we injected a failure in the " +
                "majority.");
          }
          qjm.close();
          qjm = null;

          // Now should be able to recover
          qjm = createInjectableQJM(cluster);
          long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
          assertTrue(lastRecoveredTxn >= lastAckedTxn);
         
          writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
        } catch (Throwable t) {
          // Test failure! Rethrow with the test setup info so it can be
          // easily triaged.
          throw new RuntimeException("Test failed with injection: " + injectionStr,
                t);
        } finally {
          cluster.shutdown();
          cluster = null;
          IOUtils.closeStream(qjm);
          qjm = null;
        }
      }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.