Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode


    List<File> files = new ArrayList<File>();
    List<DataNode> datanodes = cluster.getDataNodes();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
    for(int i = 0; i < blocks.size(); i++) {
      DataNode dn = datanodes.get(i);
      Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
      for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
        for(Block b : e.getValue()) {
          files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
        }
View Full Code Here


    //clear content type
    response.setContentType(null);
   
    if (UserGroupInformation.isSecurityEnabled()) {
      //add a token for RPC.
      final DataNode datanode = (DataNode)context.getAttribute("datanode");
      final InetSocketAddress nnRpcAddr = NameNode.getAddress(datanode.getConf());
      final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
      token.decodeFromUrlString(delegation.getValue());
      SecurityUtil.setTokenService(token, nnRpcAddr);
      token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
      ugi.addToken(token);
View Full Code Here

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
      @Override
      public Response run() throws IOException, URISyntaxException {

    final String fullpath = path.getAbsolutePath();
    final DataNode datanode = (DataNode)context.getAttribute("datanode");

    switch(op.getValue()) {
    case CREATE:
    {
      final Configuration conf = new Configuration(datanode.getConf());
      conf.set(FsPermission.UMASK_LABEL, "000");

      final int b = bufferSize.getValue(conf);
      DFSClient dfsclient = new DFSClient(conf);
      FSDataOutputStream out = null;
View Full Code Here

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
      @Override
      public Response run() throws IOException {

    final String fullpath = path.getAbsolutePath();
    final DataNode datanode = (DataNode)context.getAttribute("datanode");

    switch(op.getValue()) {
    case APPEND:
    {
      final Configuration conf = new Configuration(datanode.getConf());
      final int b = bufferSize.getValue(conf);
      DFSClient dfsclient = new DFSClient(conf);
      FSDataOutputStream out = null;
      try {
        out = dfsclient.append(fullpath, b, null, null);
View Full Code Here

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
      @Override
      public Response run() throws IOException {

    final String fullpath = path.getAbsolutePath();
    final DataNode datanode = (DataNode)context.getAttribute("datanode");
    final Configuration conf = new Configuration(datanode.getConf());
    final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);

    switch(op.getValue()) {
    case OPEN:
    {
View Full Code Here

      byte[] writeBuf = new byte[fileLen];
      new Random().nextBytes(writeBuf);
      out = fs.create(src);
      out.write(writeBuf);
      out.hflush();
      DataNode dn = cluster.getDataNodes().get(0);
      for (FsVolumeSpi v : dataset(dn).getVolumes()) {
        final FsVolumeImpl volume = (FsVolumeImpl)v;
        File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
        File rbwDir = new File(currentDir, "rbw");
        for (File file : rbwDir.listFiles()) {
View Full Code Here

        Path fileName = new Path("/test"+i);
        DFSTestUtil.createFile(fs, fileName, 1, (short)1, 0L);
        DFSTestUtil.waitReplication(fs, fileName, (short)1);
      }
      String bpid = cluster.getNamesystem().getBlockPoolId();
      DataNode dn = cluster.getDataNodes().get(0);
      Iterator<ReplicaInfo> replicasItor =
          dataset(dn).volumeMap.replicas(bpid).iterator();
      ReplicaInfo replica = replicasItor.next();
      createUnlinkTmpFile(replica, true, true); // rename block file
      createUnlinkTmpFile(replica, false, true); // rename meta file
View Full Code Here

      fs = cluster.getFileSystem();
      final FSNamesystem namesystem = cluster.getNamesystem();

      conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
      cluster.startDataNodes(conf, 1, true, null, null, null);
      DataNode lastDN = cluster.getDataNodes().get(3);
      DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
          lastDN, namesystem.getBlockPoolId());
      String lastDNid = dnReg.getStorageID();

      final Path fileName = new Path("/foo2");
View Full Code Here

      out.hsync();
      cluster.startDataNodes(conf, 1, true, null, null, null);
      String bpid = namesystem.getBlockPoolId();
      ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
      Block block = blk.getLocalBlock();
      DataNode dn = cluster.getDataNodes().get(0);

      // Delete partial block and its meta information from the RBW folder
      // of first datanode.
      File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
      File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
View Full Code Here

    List<File> files = new ArrayList<File>();
    List<DataNode> datanodes = cluster.getDataNodes();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
    for(int i = 0; i < blocks.length; i++) {
      DataNode dn = datanodes.get(i);
      for(Block b : blocks[i]) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }       
    }
    return files;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.DataNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.