Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.stopDataNode()


      // Make the last datanode look like it failed to heartbeat by
      // calling removeDatanode and stopping it.
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      int idx = datanodes.size() - 1;
      DataNode dataNode = datanodes.get(idx);
      cluster.stopDataNode(idx);
      ns.removeDatanode(dataNode.dnRegistration);

      // The block should still have sufficient # replicas, across racks.
      // The last node may not have contained a replica, but if it did
      // it should have been replicated within the same rack.
View Full Code Here


      // Fail the last datanode again, it's also on rack2 so there is
      // only 1 rack for all the replicas
      datanodes = cluster.getDataNodes();
      idx = datanodes.size() - 1;
      dataNode = datanodes.get(idx);
      cluster.stopDataNode(idx);
      ns.removeDatanode(dataNode.dnRegistration);

      // Make sure we have enough live replicas even though we are
      // short one rack and therefore need one replica
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
View Full Code Here

      // Make the last (cross rack) datanode look like it failed
      // to heartbeat by stopping it and calling removeDatanode.
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      assertEquals(3, datanodes.size());
      DataNode dataNode = datanodes.get(2);
      cluster.stopDataNode(2);
      ns.removeDatanode(dataNode.dnRegistration);

      // The block gets re-replicated to another datanode so it has a
      // sufficient # replicas, but not across racks, so there should
      // be 1 rack, and 1 needed replica (even though there are 2 hosts
View Full Code Here

        } catch (InterruptedException ignored) {
        }
      }

      // remove a datanode to force re-establishing pipeline
      cluster.stopDataNode(0);
      // append the rest of the file
      stm.write(rawData, mid, rawData.length - mid);
      stm.close();
      // check if append is successful
      FSDataInputStream in5 = fs.open(fileToAppend);
View Full Code Here

        } catch (InterruptedException ignored) {
        }
      }

      // remove a datanode to force re-establishing pipeline
      cluster.stopDataNode(0);
      // write the rest of the file
      stm.write(rawData, mid, rawData.length - mid);
      stm.close();
      // check if write is successful
      FSDataInputStream in4 = fs.open(fileToWrite);
View Full Code Here

      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      assertTrue(cluster.corruptReplica(block.getBlockName(), 0));
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(System.getProperty("test.build.data"),
          "dfs/data/data1" + MiniDFSCluster.FINALIZED_DIR_NAME +
          "dncp_block_verification.log.curr");
      //wait for one minute for deletion to succeed;
View Full Code Here

      cluster.waitActive(false);
     
      LOG.info("Bringing down first DN");
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        datanode.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
View Full Code Here

      }
      assertTrue(nonExcessDN!=null);

      LOG.info("Stopping non-excess node: " + nonExcessDN);
      // bring down non excessive datanode
      dnprop = cluster.stopDataNode(nonExcessDN.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        nonExcessDN.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
View Full Code Here

      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0, cluster);
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(cluster.getBlockDirectory("data1").getParent(), "dncp_block_verification.log.curr");
      //wait for one minute for deletion to succeed;
      scanLog.delete();
     
View Full Code Here

      LOG.info("Failing over to NN 1");
      scenario.run(cluster);

      assertTrue(fs.exists(TEST_PATH));
     
      cluster.stopDataNode(0);

      // write another block and a half
      AppendTestUtil.write(stm, BLOCK_AND_A_HALF, BLOCK_AND_A_HALF);
      stm.hflush();
     
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.