Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode


  }

  @VisibleForTesting
  Token<DelegationTokenIdentifier> deserializeToken
          (String delegation,String nnId) throws IOException {
    final DataNode datanode = (DataNode) context.getAttribute("datanode");
    final Configuration conf = datanode.getConf();
    final Token<DelegationTokenIdentifier> token = new
            Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(delegation);
    URI nnUri = URI.create(HdfsConstants.HDFS_URI_SCHEME +
            "://" + nnId);
View Full Code Here


      final OverwriteParam overwrite,
      final BufferSizeParam bufferSize,
      final ReplicationParam replication,
      final BlockSizeParam blockSize
      ) throws IOException, URISyntaxException {
    final DataNode datanode = (DataNode)context.getAttribute("datanode");

    switch(op.getValue()) {
    case CREATE:
    {
      final Configuration conf = new Configuration(datanode.getConf());
      conf.set(FsPermission.UMASK_LABEL, "000");

      final int b = bufferSize.getValue(conf);
      DFSClient dfsclient = newDfsClient(nnId, conf);
      FSDataOutputStream out = null;
View Full Code Here

      final String nnId,
      final String fullpath,
      final PostOpParam op,
      final BufferSizeParam bufferSize
      ) throws IOException {
    final DataNode datanode = (DataNode)context.getAttribute("datanode");

    switch(op.getValue()) {
    case APPEND:
    {
      final Configuration conf = new Configuration(datanode.getConf());
      final int b = bufferSize.getValue(conf);
      DFSClient dfsclient = newDfsClient(nnId, conf);
      FSDataOutputStream out = null;
      try {
        out = dfsclient.append(fullpath, b, null, null);
View Full Code Here

      final GetOpParam op,
      final OffsetParam offset,
      final LengthParam length,
      final BufferSizeParam bufferSize
      ) throws IOException {
    final DataNode datanode = (DataNode)context.getAttribute("datanode");
    final Configuration conf = new Configuration(datanode.getConf());

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
View Full Code Here

          "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
     
      // Check storage usage
      // can't check capacities for real storage since the OS file system may be changing under us.
      if (simulatedStorage) {
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
        assertEquals(fileSize, dataset.getDfsUsed());
        assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
            dataset.getRemaining());
      }
View Full Code Here

          f, 0, Long.MAX_VALUE);
      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
        ExtendedBlock blk = locatedblock.getBlock();
        Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
            blk.getBlockPoolId(), blk.getBlockId());
        final File blockfile = DataNodeTestUtils.getFile(datanode,
            blk.getBlockPoolId(), b.getBlockId());
View Full Code Here

    ((DFSOutputStream) out.getWrappedStream()).abort();

    LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
        file.toString(), 0, count);
    ExtendedBlock block = locations.get(0).getBlock();
    DataNode dn = cluster.getDataNodes().get(0);
    BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
    File metafile = new File(localPathInfo.getMetaPath());
    assertTrue(metafile.exists());

    // reduce the block meta file size
    RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
View Full Code Here

    List<File> files = new ArrayList<File>();
    List<DataNode> datanodes = cluster.getDataNodes();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
    for(int i = 0; i < blocks.length; i++) {
      DataNode dn = datanodes.get(i);
      for(Block b : blocks[i]) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }       
    }
    return files;
View Full Code Here

      final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
      final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
      xml.declaration();

      final ServletContext context = getServletContext();
      final DataNode datanode = (DataNode) context.getAttribute("datanode");
      final Configuration conf =
        new HdfsConfiguration(datanode.getConf());
      final int socketTimeout = conf.getInt(
          DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
          HdfsServerConstants.READ_TIMEOUT);
      final SocketFactory socketFactory = NetUtils.getSocketFactory(conf,
          ClientProtocol.class);
View Full Code Here

    final Block blk = lb.getBlock();
    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset)dn.getFSDataset();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.DataNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.