Examples of CacheDirectiveInfo


Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

          NameNodeFile.IMAGE);
      break;
    }
    case OP_ADD_CACHE_DIRECTIVE: {
      AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
      CacheDirectiveInfo result = fsNamesys.
          getCacheManager().addDirectiveFromEditLog(addOp.directive);
      if (toAddRetryCache) {
        Long id = result.getId();
        fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id);
      }
      break;
    }
    case OP_MODIFY_CACHE_DIRECTIVE: {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    proto.addCachePool(new CachePoolInfo("pool3").
        setMode(new FsPermission((short)0777)));
    proto.addCachePool(new CachePoolInfo("pool4").
        setMode(new FsPermission((short)0)));

    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().
        setPath(new Path("/alpha")).
        setPool("pool1").
        build();
    CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().
        setPath(new Path("/beta")).
        setPool("pool2").
        build();
    CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().
        setPath(new Path("/delta")).
        setPool("pool1").
        build();

    long alphaId = addAsUnprivileged(alpha);
    long alphaId2 = addAsUnprivileged(alpha);
    assertFalse("Expected to get unique directives when re-adding an "
        + "existing CacheDirectiveInfo",
        alphaId == alphaId2);
    long betaId = addAsUnprivileged(beta);

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/unicorn")).
          setPool("no_such_pool").
          build());
      fail("expected an error when adding to a non-existent pool.");
    } catch (InvalidRequestException ioe) {
      GenericTestUtils.assertExceptionContains("Unknown pool", ioe);
    }

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/blackhole")).
          setPool("pool4").
          build());
      fail("expected an error when adding to a pool with " +
          "mode 0 (no permissions for anyone).");
    } catch (AccessControlException e) {
      GenericTestUtils.
          assertExceptionContains("Permission denied while accessing pool", e);
    }

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/illegal:path/")).
          setPool("pool1").
          build());
      fail("expected an error when adding a malformed path " +
          "to the cache directives.");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e);
    }

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/emptypoolname")).
          setReplication((short)1).
          setPool("").
          build());
      fail("expected an error when adding a cache " +
          "directive with an empty pool name.");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
    }

    long deltaId = addAsUnprivileged(delta);

    // We expect the following to succeed, because DistributedFileSystem
    // qualifies the path.
    long relativeId = addAsUnprivileged(
        new CacheDirectiveInfo.Builder().
            setPath(new Path("relative")).
            setPool("pool1").
            build());

    RemoteIterator<CacheDirectiveEntry> iter;
    iter = dfs.listCacheDirectives(null);
    validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId );
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool3").build());
    assertFalse(iter.hasNext());
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool1").build());
    validateListAll(iter, alphaId, alphaId2, deltaId, relativeId );
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool2").build());
    validateListAll(iter, betaId);
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setId(alphaId2).build());
    validateListAll(iter, alphaId2);
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setId(relativeId).build());
    validateListAll(iter, relativeId);

    dfs.removeCacheDirective(betaId);
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool2").build());
    assertFalse(iter.hasNext());

    try {
      dfs.removeCacheDirective(betaId);
      fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }

    try {
      proto.removeCacheDirective(-42l);
      fail("expected an error when removing a negative ID");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains(
          "Invalid negative ID", e);
    }
    try {
      proto.removeCacheDirective(43l);
      fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }

    dfs.removeCacheDirective(alphaId);
    dfs.removeCacheDirective(alphaId2);
    dfs.removeCacheDirective(deltaId);

    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().
        setId(relativeId).
        setReplication((short)555).
        build());
    iter = dfs.listCacheDirectives(null);
    assertTrue(iter.hasNext());
    CacheDirectiveInfo modified = iter.next().getInfo();
    assertEquals(relativeId, modified.getId().longValue());
    assertEquals((short)555, modified.getReplication().shortValue());
    dfs.removeCacheDirective(relativeId);
    iter = dfs.listCacheDirectives(null);
    assertFalse(iter.hasNext());

    // Verify that PBCDs with path "." work correctly
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().setPath(new Path("."))
            .setPool("pool1").build();
    long id = dfs.addCacheDirective(directive);
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(
        directive).setId(id).setReplication((short)2).build());
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      }
      RemoteIterator<CacheDirectiveEntry> dit
          = dfs.listCacheDirectives(null);
      for (int i=0; i<numEntries; i++) {
        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
        CacheDirectiveInfo cd = dit.next().getInfo();
        assertEquals(i+1, cd.getId().longValue());
        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
        assertEquals(pool, cd.getPool());
      }
      assertFalse("Unexpected # of cache directives found", dit.hasNext());
     
      // Checkpoint once to set some cache pools and directives on 2NN side
      secondary.doCheckpoint();
     
      // Add some more CacheManager state
      final String imagePool = "imagePool";
      dfs.addCachePool(new CachePoolInfo(imagePool));
      prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
        .setPath(new Path("/image")).setPool(imagePool).build());

      // Save a new image to force a fresh fsimage download
      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      dfs.saveNamespace();
      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

      // Checkpoint again forcing a reload of FSN state
      boolean fetchImage = secondary.doCheckpoint();
      assertTrue("Secondary should have fetched a new fsimage from NameNode",
          fetchImage);

      // Remove temp pool and directive
      dfs.removeCachePool(imagePool);

      // Restart namenode
      cluster.restartNameNode();
   
      // Check that state came back up
      pit = dfs.listCachePools();
      assertTrue("No cache pools found", pit.hasNext());
      info = pit.next().getInfo();
      assertEquals(pool, info.getPoolName());
      assertEquals(pool, info.getPoolName());
      assertEquals(groupName, info.getGroupName());
      assertEquals(mode, info.getMode());
      assertEquals(limit, (long)info.getLimit());
      assertFalse("Unexpected # of cache pools found", pit.hasNext());
   
      dit = dfs.listCacheDirectives(null);
      for (int i=0; i<numEntries; i++) {
        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
        CacheDirectiveInfo cd = dit.next().getInfo();
        assertEquals(i+1, cd.getId().longValue());
        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
        assertEquals(pool, cd.getPool());
        assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
      }
      assertFalse("Unexpected # of cache directives found", dit.hasNext());
 
      long nextId = dfs.addCacheDirective(
            new CacheDirectiveInfo.Builder().
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    // Check the initial statistics at the namenode
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
    // Cache and check each path in sequence
    int expected = 0;
    for (int i=0; i<numFiles; i++) {
      CacheDirectiveInfo directive =
          new CacheDirectiveInfo.Builder().
            setPath(new Path(paths.get(i))).
            setPool(pool).
            build();
      nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    DFSTestUtil.createFile(dfs, path1, 2*BLOCK_SIZE, (short)1, 0x9494);
    // Start off with a limit that is too small
    final CachePoolInfo poolInfo = new CachePoolInfo(destiny)
        .setLimit(2*BLOCK_SIZE-1);
    dfs.addCachePool(poolInfo);
    final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder()
        .setPool(destiny).setPath(path1).build();
    try {
      dfs.addCacheDirective(info1);
      fail("Should not be able to cache when there is no more limit");
    } catch (InvalidRequestException e) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
      assertExceptionContains("too big", e);
    }
    // Test that adding a directives without an expiration uses the pool's max
    CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder()
        .setPath(new Path("/blah"))
        .setPool(coolPool.getPoolName())
        .build();
    dfs.addCacheDirective(defaultExpiry);
    RemoteIterator<CacheDirectiveEntry> dirIt =
        dfs.listCacheDirectives(defaultExpiry);
    CacheDirectiveInfo listInfo = dirIt.next().getInfo();
    assertFalse("Should only have one entry in listing", dirIt.hasNext());
    long listExpiration = listInfo.getExpiration().getAbsoluteMillis()
        - new Date().getTime();
    assertTrue("Directive expiry should be approximately the pool's max expiry",
        Math.abs(listExpiration - poolExpiration) < 10*1000);
    // Test that the max is enforced on add for relative and absolute
    CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder()
        .setPath(new Path("/lolcat"))
        .setPool(coolPool.getPoolName());
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newRelative(poolExpiration+1))
          .build());
      fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newAbsolute(
              new Date().getTime() + poolExpiration + (10*1000)))
          .build());
      fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test that max is enforced on modify for relative and absolute Expirations
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newRelative(poolExpiration+1))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newAbsolute(
              new Date().getTime() + poolExpiration + (10*1000)))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test some giant limit values with add
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newRelative(
              Long.MAX_VALUE))
          .build());
      fail("Added a directive with a gigantic max value");
    } catch (IllegalArgumentException e) {
      assertExceptionContains("is too far in the future", e);
    }
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newAbsolute(
              Long.MAX_VALUE))
          .build());
      fail("Added a directive with a gigantic max value");
    } catch (InvalidRequestException e) {
      assertExceptionContains("is too far in the future", e);
    }
    // Test some giant limit values with modify
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.NEVER)
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newAbsolute(
              Long.MAX_VALUE))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("is too far in the future", e);
    }
    // Test that the max is enforced on modify correctly when changing pools
    CachePoolInfo destPool = new CachePoolInfo("destPool");
    dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setPool(destPool.getPoolName())
          .build());
      fail("Modified a directive to a pool with a lower max expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
        .setId(listInfo.getId())
        .setPool(destPool.getPoolName())
        .setExpiration(Expiration.newRelative(poolExpiration / 2))
        .build());
    dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder()
        .setPool(destPool.getPoolName())
        .build());
    listInfo = dirIt.next().getInfo();
    listExpiration = listInfo.getExpiration().getAbsoluteMillis()
        - new Date().getTime();
    assertTrue("Unexpected relative expiry " + listExpiration
        + " expected approximately " + poolExpiration/2,
        Math.abs(poolExpiration/2 - listExpiration) < 10*1000);
    // Test that cache pool and directive expiry can be modified back to never
    dfs.modifyCachePool(destPool
        .setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
    poolIt = dfs.listCachePools();
    listPool = poolIt.next().getInfo();
    while (!listPool.getPoolName().equals(destPool.getPoolName())) {
      listPool = poolIt.next().getInfo();
    }
    assertEquals("Expected max relative expiry to match set value",
        CachePoolInfo.RELATIVE_EXPIRY_NEVER,
        listPool.getMaxRelativeExpiryMs().longValue());
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder()
        .setId(listInfo.getId())
        .setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER))
        .build());
    // Test modifying close to the limit
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder()
        .setId(listInfo.getId())
        .setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1))
        .build());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      }
      if (directive.getId() != null) {
        throw new IOException("addDirective: you cannot specify an ID " +
            "for this operation.");
      }
      CacheDirectiveInfo effectiveDirective =
          cacheManager.addDirective(directive, pc, flags);
      getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
          cacheEntry != null);
      result = effectiveDirective.getId();
      effectiveDirectiveStr = effectiveDirective.toString();
      success = true;
    } finally {
      writeUnlock();
      if (success) {
        getEditLog().logSync();
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      prog.beginStep(Phase.LOADING_FSIMAGE, step);
      int numDirectives = in.readInt();
      prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
      for (int i = 0; i < numDirectives; i++) {
        CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
        // Get pool reference by looking it up in the map
        final String poolName = info.getPool();
        CacheDirective directive =
            new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
                info.getReplication(), info.getExpiration().getAbsoluteMillis());
        addCacheDirective(poolName, directive);
        counter.increment();
      }
      prog.endStep(Phase.LOADING_FSIMAGE, step);
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    Long id = info.getId();
    if (id == null) {
      throw new InvalidRequestException("Must supply an ID.");
    }
    CacheDirective prevEntry = getById(id);
    CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
    removeInternal(prevEntry);
    addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      }
      CacheDirective prevEntry = getById(id);
      checkWritePermission(pc, prevEntry.getPool());

      // Fill in defaults
      CacheDirectiveInfo infoWithDefaults =
          createFromInfoAndDefaults(info, prevEntry);
      CacheDirectiveInfo.Builder builder =
          new CacheDirectiveInfo.Builder(infoWithDefaults);

      // Do validation
      validatePath(infoWithDefaults);
      validateReplication(infoWithDefaults, (short)-1);
      // Need to test the pool being set here to avoid rejecting a modify for a
      // directive that's already been forced into a pool
      CachePool srcPool = prevEntry.getPool();
      CachePool destPool = getCachePool(validatePoolName(infoWithDefaults));
      if (!srcPool.getPoolName().equals(destPool.getPoolName())) {
        checkWritePermission(pc, destPool);
        if (!flags.contains(CacheFlag.FORCE)) {
          checkLimit(destPool, infoWithDefaults.getPath().toUri().getPath(),
              infoWithDefaults.getReplication());
        }
      }
      // Verify the expiration against the destination pool
      validateExpiryTime(infoWithDefaults, destPool.getMaxRelativeExpiryMs());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.