Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage


      return StorageState.NORMAL;
    }
  }

  public static DatanodeStorage convert(DatanodeStorageProto s) {
    return new DatanodeStorage(s.getStorageID(), PBHelper.convert(s.getState()));
  }
View Full Code Here


          new ExportedBlockKeys(), VersionInfo.getVersion());
      DataNode.setNewStorageID(dnRegistration);
      // register datanode
      dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
      //first block reports
      storage = new DatanodeStorage(dnRegistration.getStorageID());
      final StorageBlockReport[] reports = {
          new StorageBlockReport(storage,
              new BlockListAsLongs(null, null).getBlockListAsLongs())
      };
      nameNodeProto.blockReport(dnRegistration,
View Full Code Here

          bpos.getBlockPoolId());

      // Send block report
      long brSendStartTime = now();
      StorageBlockReport[] report = { new StorageBlockReport(
          new DatanodeStorage(bpRegistration.getStorageID()),
          bReport.getBlockListAsLongs()) };
      cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), report);

      // Log the block report processing stats from Datanode perspective
      long brSendCost = now() - brSendStartTime;
View Full Code Here

      return StorageState.NORMAL;
    }
  }

  public static DatanodeStorage convert(DatanodeStorageProto s) {
    return new DatanodeStorage(s.getStorageID(), PBHelper.convert(s.getState()));
  }
View Full Code Here

      // Expected
    }

    // Ensure blockReport from dead datanode is rejected with IOException
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(reg.getStorageID()),
        new long[] { 0L, 0L, 0L }) };
    try {
      dnp.blockReport(reg, poolId, report);
      Assert.fail("Expected IOException is not thrown");
    } catch (IOException ex) {
View Full Code Here

      StorageBlockReport[] reports =
          new StorageBlockReport[perVolumeBlockLists.size()];

      int i = 0;
      for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
        DatanodeStorage dnStorage = kvPair.getKey();
        BlockListAsLongs blockList = kvPair.getValue();
        totalBlockCount += blockList.getNumberOfBlocks();

        reports[i++] =
            new StorageBlockReport(
View Full Code Here

  public StorageType getStorageType() {
    return storageType;
  }
 
  DatanodeStorage toDatanodeStorage() {
    return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, storageType);
  }
View Full Code Here

    }
    return storages;
  }
  public static DatanodeStorageInfo createDatanodeStorageInfo(
      String storageID, String ip, String rack) {
    final DatanodeStorage storage = new DatanodeStorage(storageID);
    final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(ip, rack, storage);
    return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
  }
View Full Code Here

  }

  @Override
  public synchronized Map<DatanodeStorage, BlockListAsLongs> getBlockReports(
      String bpid) {
    return Collections.singletonMap(new DatanodeStorage(storage.storageUuid), getBlockReport(bpid));
  }
View Full Code Here

    String getStorageUuid() {
      return storageUuid;
    }

    synchronized StorageReport getStorageReport(String bpid) {
      return new StorageReport(new DatanodeStorage(getStorageUuid()),
          false, getCapacity(), getUsed(), getFree(),
          map.get(bpid).getUsed());
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.