Examples of countLinesWithMessage()


Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    try {
      // Set up a logger to check log message
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);
      int count = appender.countLinesWithMessage(
          "Maximum size of an xattr: 0 (unlimited)");
      assertEquals("Expected no messages about unlimited xattr size", 0, count);

      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0);
      cluster =
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0);
      cluster =
          new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();

      count = appender.countLinesWithMessage(
          "Maximum size of an xattr: 0 (unlimited)");
      // happens twice because we format then run
      assertEquals("Expected unlimited xattr size", 2, count);
    } finally {
      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        int lines = appender.countLinesWithMessage(
            "more bytes in the cache: " +
            DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
        return lines > 0;
      }
    }, 500, 30000);
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    // Check that no DNs saw an excess CACHE message
    int lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
    // Try creating a file with giant-sized blocks that exceed cache capacity
    dfs.delete(fileName, false);
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
        (short) 1, 0xFADED);
    // Nothing will get cached, so just force sleep for a bit
    Thread.sleep(4000);
    // Still should not see any excess commands
    lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
  }
}
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    // Check that no DNs saw an excess CACHE message
    int lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
    // Try creating a file with giant-sized blocks that exceed cache capacity
    dfs.delete(fileName, false);
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
        (short) 1, 0xFADED);
    // Nothing will get cached, so just force sleep for a bit
    Thread.sleep(4000);
    // Still should not see any excess commands
    lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
  }
}
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        int lines = appender.countLinesWithMessage(
            "more bytes in the cache: " +
            DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
        return lines > 0;
      }
    }, 500, 30000);
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    // Check that no DNs saw an excess CACHE message
    int lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
    // Try creating a file with giant-sized blocks that exceed cache capacity
    dfs.delete(fileName, false);
View Full Code Here

Examples of org.apache.hadoop.hdfs.LogVerificationAppender.countLinesWithMessage()

    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
        (short) 1, 0xFADED);
    // Nothing will get cached, so just force sleep for a bit
    Thread.sleep(4000);
    // Still should not see any excess commands
    lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
  }
}
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.