Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks


    fout.hflush();
    long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

    // Now abandon the last block
    DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
    LocatedBlocks blocks =
      dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
    int orginalNumBlocks = blocks.locatedBlockCount();
    LocatedBlock b = blocks.getLastLocatedBlock();
    dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
        dfsclient.clientName);
   
    // call abandonBlock again to make sure the operation is idempotent
    dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
        dfsclient.clientName);

    // And close the file
    fout.close();

    // Close cluster and check the block has been abandoned after restart
    cluster.restartNameNode();
    blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
        Integer.MAX_VALUE);
    Assert.assertEquals("Blocks " + b + " has not been abandoned.",
        orginalNumBlocks, blocks.locatedBlockCount() + 1);
  }
View Full Code Here


          if (i == 0) {
            dfsClientReadFile(filePath);
          } else {
            dfsClientReadFileFromPosition(filePath);
          }
        LocatedBlocks blocks = dfs.dfs.getNamenode().
                  getBlockLocations(filePath.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
      }
      verifyFirstBlockCorrupted(filePath, false);
      int expectedReplicaCount = repl-corruptBlocReplicas;
      verifyCorruptedBlockCount(filePath, expectedReplicaCount);
      verifyFsckHealth("Target Replicas is 3 but found 1 replica");
View Full Code Here

      int corruptBlockCount) throws IOException, AccessControlException,
      FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
    DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
    DFSTestUtil.waitReplication(dfs, filePath, repl);
    // Locate the file blocks by asking name node
    final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
        .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
    // The file only has one block
    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
      DatanodeInfo dninfo = datanodeinfos[i];
View Full Code Here

   * Verify the first block of the file is corrupted (for all its replica).
   */
  private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted)
      throws AccessControlException, FileNotFoundException,
      UnresolvedLinkException, IOException {
    final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode()
        .getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
    Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
  }
View Full Code Here

   * location from name node.
   */
  private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas)
      throws AccessControlException, FileNotFoundException,
      UnresolvedLinkException, IOException {
    final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations(
        filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    // we expect only the first block of the file is used for this test
    LocatedBlock firstLocatedBlock = lBlocks.get(0);
    Assert.assertEquals(expectedReplicas,
        firstLocatedBlock.getLocations().length);
  }
View Full Code Here

        .format(true).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    try {
      DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
      LocatedBlocks lb = cluster.getNameNode().getRpcServer()
          .getBlockLocations("/tmp/x", 0, 16);
      // Create a new block object, because the block inside LocatedBlock at
      // namenode is of type BlockInfo.
      ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
      Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
      final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
      ClientDatanodeProtocol proxy =
          DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
      try {
        proxy.getBlockLocalPathInfo(blk, token);
        Assert.fail("The call should have failed as this user "
View Full Code Here

      long totalUsedSpace = totalCapacity * 3 / 10;
      long length = totalUsedSpace / numOfDatanodes;
      TestBalancer.createFile(cluster, filePath, length,
          (short) numOfDatanodes, 0);
     
      LocatedBlocks lbs = client.getBlockLocations(filePath.toUri().getPath(), 0,
          length);
      Set<ExtendedBlock> before = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);

      long newCapacity = CAPACITY;
      String newRack = RACK1;
      String newNodeGroup = NODEGROUP2;
      // start up an empty node with the same capacity and on the same rack
      cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
          new long[] {newCapacity}, new String[]{newNodeGroup});

      totalCapacity += newCapacity;

      // run balancer and validate results
      runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
     
      lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length);
      Set<ExtendedBlock> after = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);
      assertEquals(before, after);
     
    } finally {
      cluster.shutdown();
    }
View Full Code Here

    // corrupt
    ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
    LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);

    List<LocatedBlock> ls = Arrays.asList(l1, l2);
    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true);

    BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);

    assertTrue("expected 2 blocks but got " + bs.length,
               bs.length == 2);

    int corruptCount = 0;
    for (BlockLocation b: bs) {
      if (b.isCorrupt()) {
        corruptCount++;
      }
    }

    assertTrue("expected 1 corrupt files but got " + corruptCount,
        corruptCount == 1);

    // test an empty location
    bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
    assertEquals(0, bs.length);
  }
View Full Code Here

        System.out.println("Encountered expected exception");
      }

      // verify that no blocks are associated with this file
      // bad block allocations were cleaned up earlier.
      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up",
                 locations.locatedBlockCount() == 0);
    } finally {
      cluster.shutdown();
      client.close();
    }
  }
View Full Code Here

      Path file1 = new Path("/filestatus.dat");
      createFile(dfs, file1, 1);
      System.out.println("testFileCreationError2: "
                         + "Created file filestatus.dat with one replicas.");

      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("testFileCreationError2: "
          + "The file has " + locations.locatedBlockCount() + " blocks.");

      // add one block to the file
      LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
          client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null);
      System.out.println("testFileCreationError2: "
          + "Added block " + location.getBlock());

      locations = client.getNamenode().getBlockLocations(file1.toString(),
                                                    0, Long.MAX_VALUE);
      int count = locations.locatedBlockCount();
      System.out.println("testFileCreationError2: "
          + "The file now has " + count + " blocks.");
     
      // set the soft and hard limit to be 1 second so that the
      // namenode triggers lease recovery
      cluster.setLeasePeriod(leasePeriod, leasePeriod);

      // wait for the lease to expire
      try {
        Thread.sleep(5 * leasePeriod);
      } catch (InterruptedException e) {
      }

      // verify that the last block was synchronized.
      locations = client.getNamenode().getBlockLocations(file1.toString(),
                                                    0, Long.MAX_VALUE);
      System.out.println("testFileCreationError2: "
          + "locations = " + locations.locatedBlockCount());
      assertEquals(0, locations.locatedBlockCount());
      System.out.println("testFileCreationError2 successful");
    } finally {
      IOUtils.closeStream(dfs);
      cluster.shutdown();
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlocks

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.