Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DistributedFileSystem


  @Test (timeout = 30000)
  public void testRecoverLease() throws IOException {
    HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
    CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
    Mockito.when(reporter.progress()).thenReturn(true);
    DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class);
    // Fail four times and pass on the fifth.
    Mockito.when(dfs.recoverLease(FILE)).
      thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true);
    assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
    Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
    // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
    // invocations will happen pretty fast... the we fall into the longer wait loop).
View Full Code Here


    LOG.info("parity path: " + parityPathStr);
    FileSystem parityFS = ppair.getFileSystem();
    if (!(parityFS instanceof DistributedFileSystem)) {
      throw new IOException("parity file is not on distributed file system");
    }
    DistributedFileSystem parityDFS = (DistributedFileSystem) parityFS;

   
    // now corrupt the block corresponding to the stripe selected
    FileStatus parityFileStatus =
      parityDFS.getFileStatus(new Path(parityPathStr));
    long parityBlockSize = parityFileStatus.getBlockSize();
    long parityFileLength = parityFileStatus.getLen();
    long parityFileLengthInBlocks = (parityFileLength / parityBlockSize) +
      (((parityFileLength % parityBlockSize) == 0) ? 0L : 1L);
    if (parityFileLengthInBlocks <= stripe) {
      throw new IOException("selected stripe " + stripe +
                            " but parity file only has " +
                            parityFileLengthInBlocks + " blocks");
    }
    if (parityBlockSize != BLOCK_SIZE) {
      throw new IOException("file block size is " + BLOCK_SIZE +
                            " but parity file block size is " +
                            parityBlockSize);
    }
    LocatedBlocks parityFileBlocks = parityDFS.getClient().namenode.
      getBlockLocations(parityPathStr, 0, parityFileLength);
    if (blockInStripe >= PARITY_BLOCKS) {
      throw new IOException("blockInStripe is " + blockInStripe +
                            " but must be smaller than " + PARITY_BLOCKS);
    }
View Full Code Here

  @Override
  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
    // We want to use RAID logic only on instance of DFS.
    if (fs instanceof DistributedFileSystem) {
      DistributedFileSystem underlyingDfs = (DistributedFileSystem) fs;
      LocatedBlocks lbs =
          underlyingDfs.getLocatedBlocks(f, 0L, Long.MAX_VALUE);
      if (lbs != null) {
        // Use underlying filesystem if the file is under construction.
        if (!lbs.isUnderConstruction()) {
          // Use underlying filesystem if file length is 0.
          final long fileSize = getFileSize(lbs);
View Full Code Here

      TestRaidDfs.waitForFileRaided(LOG, fileSys, file2, destPath);
      cnode.stop(); cnode.join();

      FileStatus file1Stat = fileSys.getFileStatus(file1);
      FileStatus file2Stat = fileSys.getFileStatus(file2);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks file1Loc =
        RaidDFSUtil.getBlockLocations(dfs, file1.toUri().getPath(),
                                      0, file1Stat.getLen());
      LocatedBlocks file2Loc =
        RaidDFSUtil.getBlockLocations(dfs, file2.toUri().getPath(),
View Full Code Here

    FileSystem.setDefaultUri(conf, namenode);
  }

  private DistributedRaidFileSystem getRaidFS() throws IOException {
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    Configuration clientConf = new Configuration(conf);
    clientConf.set("fs.hdfs.impl",
        "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
    clientConf.set("fs.raid.underlyingfs.impl",
        "org.apache.hadoop.hdfs.DistributedFileSystem");
    clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
    URI dfsUri = dfs.getUri();
    return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
  }
View Full Code Here

     *         build, it will assume cluster name matches.
     * @throws IOException
     */
    static public boolean canUseFastCopy(List<Path> srcs, Path dst,
        Configuration conf) throws IOException {
      DistributedFileSystem dstdfs = DFSUtil.convertToDFS(dst
          .getFileSystem(conf));
      if (dstdfs == null) {
        return false;
      }

      String dstClusterName = dstdfs.getClusterName();
      for (Path src : srcs) {
        DistributedFileSystem srcdfs = DFSUtil.convertToDFS(src
            .getFileSystem(conf));
        if (srcdfs == null) {
          return false;
        } else if (dstClusterName != null) {
          // We assume those clusterName == null case was older
          // version of DFS. We always enable fastcopy for those
          // cases.
          String srcClusterName = srcdfs.getClusterName();
          if (srcClusterName != null && !srcClusterName.equals(dstClusterName)) {
            return false;
          }
        }
      }
View Full Code Here

        //for raidDFS
        if(dstfs instanceof FilterFileSystem) {
            dstfs = ((FilterFileSystem) dstfs).getRawFileSystem();
        }
        if(dstfs instanceof DistributedFileSystem) {
          DistributedFileSystem dstdistfs = (DistributedFileSystem) dstfs;
          //set copybychunk to false if the concat method is not available for the
          //distributed file system
          DFSClient dfsClient = dstdistfs.getClient();
          if(dfsClient.isConcatAvailable())
            copyByChunk = true;
        }
        LOG.debug("After check, copy by chunk is set to: " + copyByChunk);
      }
View Full Code Here

    try {
      cnode = RaidNode.createRaidNode(null, localConf);
      TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
      cnode.stop(); cnode.join();
     
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals("no corrupt files expected", 0, corruptFiles.length);
      assertEquals("filesFixed() should return 0 before fixing files",
                   0, cnode.blockIntegrityMonitor.getNumFilesFixed());
      this.corruptFiles(dirPath, crcs, rsCorruptFileIdx1, dfs, files,
View Full Code Here

    try {
      cnode = RaidNode.createRaidNode(null, localConf);
      TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
      cnode.stop(); cnode.join();
     
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals("no corrupt files expected", 0, corruptFiles.length);
      assertEquals("filesFixed() should return 0 before fixing files",
                   0, cnode.blockIntegrityMonitor.getNumFilesFixed());
     
View Full Code Here

      cnode.stop(); cnode.join();

      long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

      FileStatus parityStat = fileSys.getFileStatus(parityFile);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
     
      assertEquals("no corrupt files expected", 0, corruptFiles.length);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DistributedFileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.