Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.LogVerificationAppender


    final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
    int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
    DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
        0xFADED);
    // Set up a log appender watcher
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
View Full Code Here


        // Corrupt the md5 files in all the namedirs
        corruptFSImageMD5(true);

        // Attach our own log appender so we can verify output
        final LogVerificationAppender appender = new LogVerificationAppender();
        final Logger logger = Logger.getRootLogger();
        logger.addAppender(appender);

        // Try to start a new cluster
        LOG.info("\n===========================================\n" +
        "Starting same cluster after simulated crash");
        try {
          cluster = new MiniDFSCluster.Builder(config)
            .numDataNodes(0)
            .format(false)
            .build();
          fail("Should not have successfully started with corrupt image");
        } catch (IOException ioe) {
          GenericTestUtils.assertExceptionContains(
              "Failed to load an FSImage file!", ioe);
          int md5failures = appender.countExceptionsWithMessage(
              " is corrupt with MD5 checksum of ");
          // Two namedirs, so should have seen two failures
          assertEquals(2, md5failures);
        }
    } finally {
View Full Code Here

      }
    }

    try {
      // Set up a logger to check log message
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);
      int count = appender.countLinesWithMessage(
          "Maximum size of an xattr: 0 (unlimited)");
      assertEquals("Expected no messages about unlimited xattr size", 0, count);

      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0);
      cluster =
          new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();

      count = appender.countLinesWithMessage(
          "Maximum size of an xattr: 0 (unlimited)");
      // happens twice because we format then run
      assertEquals("Expected unlimited xattr size", 2, count);
    } finally {
      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
View Full Code Here

        // Corrupt the md5 files in all the namedirs
        corruptFSImageMD5(true);

        // Attach our own log appender so we can verify output
        final LogVerificationAppender appender = new LogVerificationAppender();
        final Logger logger = Logger.getRootLogger();
        logger.addAppender(appender);

        // Try to start a new cluster
        LOG.info("\n===========================================\n" +
        "Starting same cluster after simulated crash");
        try {
          cluster = new MiniDFSCluster.Builder(config)
            .numDataNodes(0)
            .format(false)
            .build();
          fail("Should not have successfully started with corrupt image");
        } catch (IOException ioe) {
          GenericTestUtils.assertExceptionContains(
              "Failed to load an FSImage file!", ioe);
          int md5failures = appender.countExceptionsWithMessage(
              " is corrupt with MD5 checksum of ");
          // Two namedirs, so should have seen two failures
          assertEquals(2, md5failures);
        }
    } finally {
View Full Code Here

      dataNodes[i].updateHeartbeat(
          2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
          (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
    }
   
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
   
    // try to choose NUM_OF_DATANODES which is more than actually available
    // nodes.
    DatanodeDescriptor[] targets = replicator.chooseTarget(filename,
        NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
        BLOCK_SIZE);
    assertEquals(targets.length, NUM_OF_DATANODES - 2);

    final List<LoggingEvent> log = appender.getLog();
    assertNotNull(log);
    assertFalse(log.size() == 0);
    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
   
    assertEquals(lastLogEntry.getLevel(), Level.WARN);
View Full Code Here

      total = verifyExpectedCacheUsage(
          rounder.round(total + fileSizes[i]), 4 * (i + 1));
    }

    // nth file should hit a capacity exception
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        int lines = appender.countLinesWithMessage(
            "more bytes in the cache: " +
            DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
        return lines > 0;
      }
    }, 500, 30000);
View Full Code Here

      updateHeartbeatWithUsage(dataNodes[i],
          2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
          (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
    }
   
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
   
    // try to choose NUM_OF_DATANODES which is more than actually available
    // nodes.
    DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
    assertEquals(targets.length, NUM_OF_DATANODES - 2);

    final List<LoggingEvent> log = appender.getLog();
    assertNotNull(log);
    assertFalse(log.size() == 0);
    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
   
    assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
View Full Code Here

    final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
    int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
    DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
        0xFADED);
    // Set up a log appender watcher
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    // Check that no DNs saw an excess CACHE message
    int lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
    // Try creating a file with giant-sized blocks that exceed cache capacity
    dfs.delete(fileName, false);
    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
        (short) 1, 0xFADED);
    // Nothing will get cached, so just force sleep for a bit
    Thread.sleep(4000);
    // Still should not see any excess commands
    lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
  }
View Full Code Here

    final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
    int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
    DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
        0xFADED);
    // Set up a log appender watcher
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    // Check that no DNs saw an excess CACHE message
    int lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
    // Try creating a file with giant-sized blocks that exceed cache capacity
    dfs.delete(fileName, false);
    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
        (short) 1, 0xFADED);
    // Nothing will get cached, so just force sleep for a bit
    Thread.sleep(4000);
    // Still should not see any excess commands
    lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
  }
View Full Code Here

        // Corrupt the md5 files in all the namedirs
        corruptFSImageMD5(true);

        // Attach our own log appender so we can verify output
        final LogVerificationAppender appender = new LogVerificationAppender();
        final Logger logger = Logger.getRootLogger();
        logger.addAppender(appender);

        // Try to start a new cluster
        LOG.info("\n===========================================\n" +
        "Starting same cluster after simulated crash");
        try {
          cluster = new MiniDFSCluster.Builder(config)
            .numDataNodes(0)
            .format(false)
            .build();
          fail("Should not have successfully started with corrupt image");
        } catch (IOException ioe) {
          GenericTestUtils.assertExceptionContains(
              "Failed to load an FSImage file!", ioe);
          int md5failures = appender.countExceptionsWithMessage(
              " is corrupt with MD5 checksum of ");
          // Two namedirs, so should have seen two failures
          assertEquals(2, md5failures);
        }
    } finally {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.LogVerificationAppender

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.