Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo


      // Create and validate a pool
      final String pool = "poolparty";
      String groupName = "partygroup";
      FsPermission mode = new FsPermission((short)0777);
      long limit = 747;
      dfs.addCachePool(new CachePoolInfo(pool)
          .setGroupName(groupName)
          .setMode(mode)
          .setLimit(limit));
      RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
      assertTrue("No cache pools found", pit.hasNext());
      CachePoolInfo info = pit.next().getInfo();
      assertEquals(pool, info.getPoolName());
      assertEquals(groupName, info.getGroupName());
      assertEquals(mode, info.getMode());
      assertEquals(limit, (long)info.getLimit());
      assertFalse("Unexpected # of cache pools found", pit.hasNext());
   
      // Create some cache entries
      int numEntries = 10;
      String entryPrefix = "/party-";
      long prevId = -1;
      final Date expiry = new Date();
      for (int i=0; i<numEntries; i++) {
        prevId = dfs.addCacheDirective(
            new CacheDirectiveInfo.Builder().
              setPath(new Path(entryPrefix + i)).setPool(pool).
              setExpiration(
                  CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
              build());
      }
      RemoteIterator<CacheDirectiveEntry> dit
          = dfs.listCacheDirectives(null);
      for (int i=0; i<numEntries; i++) {
        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
        CacheDirectiveInfo cd = dit.next().getInfo();
        assertEquals(i+1, cd.getId().longValue());
        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
        assertEquals(pool, cd.getPool());
      }
      assertFalse("Unexpected # of cache directives found", dit.hasNext());
     
      // Checkpoint once to set some cache pools and directives on 2NN side
      secondary.doCheckpoint();
     
      // Add some more CacheManager state
      final String imagePool = "imagePool";
      dfs.addCachePool(new CachePoolInfo(imagePool));
      prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
        .setPath(new Path("/image")).setPool(imagePool).build());

      // Save a new image to force a fresh fsimage download
      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      dfs.saveNamespace();
      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

      // Checkpoint again forcing a reload of FSN state
      boolean fetchImage = secondary.doCheckpoint();
      assertTrue("Secondary should have fetched a new fsimage from NameNode",
          fetchImage);

      // Remove temp pool and directive
      dfs.removeCachePool(imagePool);

      // Restart namenode
      cluster.restartNameNode();
   
      // Check that state came back up
      pit = dfs.listCachePools();
      assertTrue("No cache pools found", pit.hasNext());
      info = pit.next().getInfo();
      assertEquals(pool, info.getPoolName());
      assertEquals(pool, info.getPoolName());
      assertEquals(groupName, info.getGroupName());
      assertEquals(mode, info.getMode());
      assertEquals(limit, (long)info.getLimit());
      assertFalse("Unexpected # of cache pools found", pit.hasNext());
   
      dit = dfs.listCacheDirectives(null);
      for (int i=0; i<numEntries; i++) {
        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
View Full Code Here


    nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

    Path rootDir = helper.getDefaultWorkingDirectory(dfs);
    // Create the pool
    final String pool = "friendlyPool";
    nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
    // Create some test files
    final int numFiles = 2;
    final int numBlocksPerFile = 2;
    final List<String> paths = new ArrayList<String>(numFiles);
    for (int i=0; i<numFiles; i++) {
View Full Code Here

  @Test(timeout=120000)
  public void testWaitForCachedReplicasInDirectory() throws Exception {
    // Create the pool
    final String pool = "friendlyPool";
    final CachePoolInfo poolInfo = new CachePoolInfo(pool);
    dfs.addCachePool(poolInfo);
    // Create some test files
    final List<Path> paths = new LinkedList<Path>();
    paths.add(new Path("/foo/bar"));
    paths.add(new Path("/foo/baz"));
View Full Code Here

   */
  @Test(timeout=120000)
  public void testReplicationFactor() throws Exception {
    // Create the pool
    final String pool = "friendlyPool";
    dfs.addCachePool(new CachePoolInfo(pool));
    // Create some test files
    final List<Path> paths = new LinkedList<Path>();
    paths.add(new Path("/foo/bar"));
    paths.add(new Path("/foo/baz"));
    paths.add(new Path("/foo2/bar2"));
View Full Code Here

    final UserGroupInformation myUser = UserGroupInformation
        .createRemoteUser("myuser");
    final DistributedFileSystem myDfs =
        (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
    final String poolName = "poolparty";
    dfs.addCachePool(new CachePoolInfo(poolName)
        .setMode(new FsPermission((short)0700)));
    // Should only see partial info
    RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
    CachePoolInfo info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertNull("Unexpected owner name", info.getOwnerName());
    assertNull("Unexpected group name", info.getGroupName());
    assertNull("Unexpected mode", info.getMode());
    assertNull("Unexpected limit", info.getLimit());
    // Modify the pool so myuser is now the owner
    final long limit = 99;
    dfs.modifyCachePool(new CachePoolInfo(poolName)
        .setOwnerName(myUser.getShortUserName())
        .setLimit(limit));
    // Should see full info
    it = myDfs.listCachePools();
    info = it.next().getInfo();
    assertFalse(it.hasNext());
    assertEquals("Expected pool name", poolName, info.getPoolName());
    assertEquals("Mismatched owner name", myUser.getShortUserName(),
        info.getOwnerName());
    assertNotNull("Expected group name", info.getGroupName());
    assertEquals("Mismatched mode", (short) 0700,
        info.getMode().toShort());
    assertEquals("Mismatched limit", limit, (long)info.getLimit());
  }
View Full Code Here

  }

  @Test(timeout=120000)
  public void testExpiry() throws Exception {
    String pool = "pool1";
    dfs.addCachePool(new CachePoolInfo(pool));
    Path p = new Path("/mypath");
    DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
    // Expire after test timeout
    Date start = new Date();
    Date expiry = DateUtils.addSeconds(start, 120);
View Full Code Here

  }

  @Test(timeout=120000)
  public void testLimit() throws Exception {
    try {
      dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
      fail("Should not be able to set a negative limit");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("negative", e);
    }
    final String destiny = "poolofdestiny";
    final Path path1 = new Path("/destiny");
    DFSTestUtil.createFile(dfs, path1, 2*BLOCK_SIZE, (short)1, 0x9494);
    // Start off with a limit that is too small
    final CachePoolInfo poolInfo = new CachePoolInfo(destiny)
        .setLimit(2*BLOCK_SIZE-1);
    dfs.addCachePool(poolInfo);
    final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder()
        .setPool(destiny).setPath(path1).build();
    try {
      dfs.addCacheDirective(info1);
      fail("Should not be able to cache when there is no more limit");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Raise the limit up to fit and it should work this time
    poolInfo.setLimit(2*BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    long id1 = dfs.addCacheDirective(info1);
    waitForCachePoolStats(dfs,
        2*BLOCK_SIZE, 2*BLOCK_SIZE,
        1, 1,
        poolInfo, "testLimit:1");
    // Adding another file, it shouldn't be cached
    final Path path2 = new Path("/failure");
    DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short)1, 0x9495);
    try {
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
          .setPool(destiny).setPath(path2).build(),
          EnumSet.noneOf(CacheFlag.class));
      fail("Should not be able to add another cached file");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Bring the limit down, the first file should get uncached
    poolInfo.setLimit(BLOCK_SIZE);
    dfs.modifyCachePool(poolInfo);
    waitForCachePoolStats(dfs,
        2*BLOCK_SIZE, 0,
        1, 0,
        poolInfo, "testLimit:2");
    RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
    assertTrue("Expected a cache pool", it.hasNext());
    CachePoolStats stats = it.next().getStats();
    assertEquals("Overlimit bytes should be difference of needed and limit",
        BLOCK_SIZE, stats.getBytesOverlimit());
    // Moving a directive to a pool without enough limit should fail
    CachePoolInfo inadequate =
        new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
    dfs.addCachePool(inadequate);
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1)
          .setId(id1).setPool(inadequate.getPoolName()).build(),
          EnumSet.noneOf(CacheFlag.class));
    } catch(InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("remaining capacity", e);
    }
    // Succeeds when force=true
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1)
        .setPool(inadequate.getPoolName()).build(),
        EnumSet.of(CacheFlag.FORCE));
    // Also can add with force=true
    dfs.addCacheDirective(
        new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName())
            .setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
  }
View Full Code Here

  @Test(timeout=30000)
  public void testMaxRelativeExpiry() throws Exception {
    // Test that negative and really big max expirations can't be set during add
    try {
      dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
      fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("negative", e);
    }
    try {
      dfs.addCachePool(new CachePoolInfo("failpool")
          .setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
      fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("too big", e);
    }
    // Test that setting a max relative expiry on a pool works
    CachePoolInfo coolPool = new CachePoolInfo("coolPool");
    final long poolExpiration = 1000 * 60 * 10l;
    dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
    RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
    CachePoolInfo listPool = poolIt.next().getInfo();
    assertFalse("Should only be one pool", poolIt.hasNext());
    assertEquals("Expected max relative expiry to match set value",
        poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
    // Test that negative and really big max expirations can't be modified
    try {
      dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
      fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
      assertExceptionContains("negative", e);
    }
    try {
      dfs.modifyCachePool(coolPool
          .setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER+1));
      fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
      assertExceptionContains("too big", e);
    }
    // Test that adding a directives without an expiration uses the pool's max
    CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder()
        .setPath(new Path("/blah"))
        .setPool(coolPool.getPoolName())
        .build();
    dfs.addCacheDirective(defaultExpiry);
    RemoteIterator<CacheDirectiveEntry> dirIt =
        dfs.listCacheDirectives(defaultExpiry);
    CacheDirectiveInfo listInfo = dirIt.next().getInfo();
    assertFalse("Should only have one entry in listing", dirIt.hasNext());
    long listExpiration = listInfo.getExpiration().getAbsoluteMillis()
        - new Date().getTime();
    assertTrue("Directive expiry should be approximately the pool's max expiry",
        Math.abs(listExpiration - poolExpiration) < 10*1000);
    // Test that the max is enforced on add for relative and absolute
    CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder()
        .setPath(new Path("/lolcat"))
        .setPool(coolPool.getPoolName());
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newRelative(poolExpiration+1))
          .build());
      fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newAbsolute(
              new Date().getTime() + poolExpiration + (10*1000)))
          .build());
      fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test that max is enforced on modify for relative and absolute Expirations
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newRelative(poolExpiration+1))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newAbsolute(
              new Date().getTime() + poolExpiration + (10*1000)))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test some giant limit values with add
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newRelative(
              Long.MAX_VALUE))
          .build());
      fail("Added a directive with a gigantic max value");
    } catch (IllegalArgumentException e) {
      assertExceptionContains("is too far in the future", e);
    }
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newAbsolute(
              Long.MAX_VALUE))
          .build());
      fail("Added a directive with a gigantic max value");
    } catch (InvalidRequestException e) {
      assertExceptionContains("is too far in the future", e);
    }
    // Test some giant limit values with modify
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.NEVER)
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newAbsolute(
              Long.MAX_VALUE))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("is too far in the future", e);
    }
    // Test that the max is enforced on modify correctly when changing pools
    CachePoolInfo destPool = new CachePoolInfo("destPool");
    dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setPool(destPool.getPoolName())
          .build());
      fail("Modified a directive to a pool with a lower max expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
        .setId(listInfo.getId())
        .setPool(destPool.getPoolName())
        .setExpiration(Expiration.newRelative(poolExpiration / 2))
        .build());
    dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder()
        .setPool(destPool.getPoolName())
        .build());
    listInfo = dirIt.next().getInfo();
    listExpiration = listInfo.getExpiration().getAbsoluteMillis()
        - new Date().getTime();
    assertTrue("Unexpected relative expiry " + listExpiration
        + " expected approximately " + poolExpiration/2,
        Math.abs(poolExpiration/2 - listExpiration) < 10*1000);
    // Test that cache pool and directive expiry can be modified back to never
    dfs.modifyCachePool(destPool
        .setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
    poolIt = dfs.listCachePools();
    listPool = poolIt.next().getInfo();
    while (!listPool.getPoolName().equals(destPool.getPoolName())) {
      listPool = poolIt.next().getInfo();
    }
    assertEquals("Expected max relative expiry to match set value",
        CachePoolInfo.RELATIVE_EXPIRY_NEVER,
        listPool.getMaxRelativeExpiryMs().longValue());
View Full Code Here

        0xFADED);
    // Set up a log appender watcher
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    // Check that no DNs saw an excess CACHE message
View Full Code Here

      if (name == null) {
        System.err.println("You must specify a name when creating a " +
            "cache pool.");
        return 1;
      }
      CachePoolInfo info = new CachePoolInfo(name);

      String owner = StringUtils.popOptionWithArgument("-owner", args);
      if (owner != null) {
        info.setOwnerName(owner);
      }
      String group = StringUtils.popOptionWithArgument("-group", args);
      if (group != null) {
        info.setGroupName(group);
      }
      String modeString = StringUtils.popOptionWithArgument("-mode", args);
      if (modeString != null) {
        short mode = Short.parseShort(modeString, 8);
        info.setMode(new FsPermission(mode));
      }
      String limitString = StringUtils.popOptionWithArgument("-limit", args);
      if (limitString != null) {
        long limit = Long.parseLong(limitString);
        info.setLimit(limit);
      }
      String maxTtlString = StringUtils.popOptionWithArgument("-maxTtl", args);
      try {
        Long maxTtl = parseTtlString(maxTtlString);
        if (maxTtl != null) {
          info.setMaxRelativeExpiryMs(maxTtl);
        }
      } catch (IOException e) {
        System.err.println(
            "Error while parsing maxTtl value: " + e.getMessage());
        return 1;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.CachePoolInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.