Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DistributedFileSystem


    FileStatus stat = fileSys.getFileStatus(file);
    return getBlockLocations(file, stat.getLen());
  }

  private DistributedRaidFileSystem getRaidFS() throws IOException {
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    Configuration clientConf = new Configuration(conf);
    clientConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
    clientConf.set("fs.raid.underlyingfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
    clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
    URI dfsUri = dfs.getUri();
    return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
  }
View Full Code Here


      Thread.sleep(1000);                  // keep waiting
    }

    while (true) {
      LocatedBlocks locations = null;
      DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
      locations = RaidDFSUtil.getBlockLocations(
        dfs, file.toUri().getPath(), 0, parityStat.getLen());
      if (!locations.isUnderConstruction()) {
        break;
      }
View Full Code Here

      FileStatus srcStat = fileSys.getFileStatus(file1);
      LocatedBlocks locations = RaidDFSUtil.getBlockLocations(
        (DistributedFileSystem) fileSys, file1.toUri().getPath(),
        0, srcStat.getLen());

      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;

      // Corrupt blocks in different stripes. We can fix them.
      int[] corruptBlockIdxs = new int[]{0, 4, 6};
      for (int idx: corruptBlockIdxs) {
        LOG.info("Corrupting block " + locations.get(idx).getBlock());
View Full Code Here

      short parityRepl = 1;
      encoder.encodeFile(fileSys, file1, fileSys, parityFile1, parityRepl,
        Reporter.NULL);

      // Ensure there are no corrupt files yet.
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);

      // Now corrupt the file.
      long corruptOffset = blockSize * 5;
 
View Full Code Here

      cnode = RaidNode.createRaidNode(null, localConf);
      TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
      cnode.stop(); cnode.join();

      FileStatus srcStat = fileSys.getFileStatus(file1);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, srcStat.getLen());

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
View Full Code Here

      cnode = RaidNode.createRaidNode(null, localConf);
      TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
      cnode.stop(); cnode.join();

      FileStatus srcStat = fileSys.getFileStatus(file1);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, srcStat.getLen());

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
View Full Code Here

      cnode.stop(); cnode.join();

      long parityCRC = getCRC(fileSys, parityFile);

      FileStatus parityStat = fileSys.getFileStatus(parityFile);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
View Full Code Here

      }

      Path partFile = new Path(harDirectory, "part-0");
      long partCRC = getCRC(fileSys, partFile);
      FileStatus partStat = fileSys.getFileStatus(partFile);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, partFile.toUri().getPath(), 0, partStat.getLen());
      // 7 parity blocks => 4 har blocks.
      assertEquals(4, locs.getLocatedBlocks().size());
      cnode.stop(); cnode.join();
View Full Code Here

   * OP_CLEAR_NS_QUOTA  (12)
   */
  private CheckpointSignature runOperations() throws IOException {
    LOG.info("Creating edits by performing fs operations");
    // no check, if it's not it throws an exception which is what we want
    DistributedFileSystem dfs = cluster.getFileSystem();
    DFSTestUtil.runOperations(cluster, dfs, cluster.getConfiguration(0),
        dfs.getDefaultBlockSize(), 0);

    // OP_ROLLING_UPGRADE_START
    cluster.getNamesystem().getEditLog().logStartRollingUpgrade(Time.now());
    // OP_ROLLING_UPGRADE_FINALIZE
    cluster.getNamesystem().getEditLog().logFinalizeRollingUpgrade(Time.now());
View Full Code Here

  @Test
  public void testSaveNamespaceWithRenamedLease() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
        .numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
    OutputStream out = null;
    try {
      fs.mkdirs(new Path("/test-target"));
      out = fs.create(new Path("/test-source/foo")); // don't close
      fs.rename(new Path("/test-source/"), new Path("/test-target/"));

      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      cluster.getNameNodeRpc().saveNamespace();
      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    } finally {
      IOUtils.cleanup(LOG, out, fs);
      if (cluster != null) {
        cluster.shutdown();
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DistributedFileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.