Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.LogVerificationAppender


      updateHeartbeatWithUsage(dataNodes[i],
          2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
          (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
    }
   
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
   
    // try to choose NUM_OF_DATANODES which is more than actually available
    // nodes.
    DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
    assertEquals(targets.length, NUM_OF_DATANODES - 2);

    final List<LoggingEvent> log = appender.getLog();
    assertNotNull(log);
    assertFalse(log.size() == 0);
    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
   
    assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
View Full Code Here


      total = DFSTestUtil.verifyExpectedCacheUsage(
          rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
    }

    // nth file should hit a capacity exception
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        int lines = appender.countLinesWithMessage(
            "more bytes in the cache: " +
            DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
        return lines > 0;
      }
    }, 500, 30000);
View Full Code Here

    final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
    int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
    DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
        0xFADED);
    // Set up a log appender watcher
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    // Check that no DNs saw an excess CACHE message
    int lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
    // Try creating a file with giant-sized blocks that exceed cache capacity
    dfs.delete(fileName, false);
    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
        (short) 1, 0xFADED);
    // Nothing will get cached, so just force sleep for a bit
    Thread.sleep(4000);
    // Still should not see any excess commands
    lines = appender.countLinesWithMessage(
        "more bytes in the cache: " +
        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.LogVerificationAppender

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.