Package org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos

Examples of org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds


        byte[] region = entry.getKey().getEncodedRegionName();
        String key = Bytes.toString(region);
        lastFlushedSequenceId = lastFlushedSequenceIds.get(key);
        if (lastFlushedSequenceId == null) {
          if (this.distributedLogReplay) {
            RegionStoreSequenceIds ids =
                SplitLogManager.getRegionFlushedSequenceId(this.watcher, failedServerName, key);
            if (ids != null) {
              lastFlushedSequenceId = ids.getLastFlushedSequenceId();
            }
          } else if (sequenceIdChecker != null) {
            lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region);
          }
          if (lastFlushedSequenceId == null) {
View Full Code Here


        Long cachedLastFlushedSequenceId =
            lastFlushedSequenceIds.get(loc.getRegionInfo().getEncodedName());

        // retrieve last flushed sequence Id from ZK. Because region postOpenDeployTasks will
        // update the value for the region
        RegionStoreSequenceIds ids =
            SplitLogManager.getRegionFlushedSequenceId(watcher, failedServerName, loc
                .getRegionInfo().getEncodedName());
        if (ids != null) {
          lastFlushedSequenceId = ids.getLastFlushedSequenceId();
          Map<byte[], Long> storeIds = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
          List<StoreSequenceId> maxSeqIdInStores = ids.getStoreSequenceIdList();
          for (StoreSequenceId id : maxSeqIdInStores) {
            storeIds.put(id.getFamilyName().toByteArray(), id.getSequenceId());
          }
          regionMaxSeqIdInStores.put(loc.getRegionInfo().getEncodedName(), storeIds);
        }
View Full Code Here

      throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
    }
    RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
        ZooKeeperProtos.RegionStoreSequenceIds.newBuilder();
    int pblen = ProtobufUtil.lengthOfPBMagic();
    RegionStoreSequenceIds storeIds = null;
    try {
      storeIds = regionSequenceIdsBuilder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
    } catch (InvalidProtocolBufferException e) {
      throw new DeserializationException(e);
    }
View Full Code Here

      throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
    }
    RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
        ZooKeeperProtos.RegionStoreSequenceIds.newBuilder();
    int pblen = ProtobufUtil.lengthOfPBMagic();
    RegionStoreSequenceIds storeIds = null;
    try {
      storeIds = regionSequenceIdsBuilder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
    } catch (InvalidProtocolBufferException e) {
      throw new DeserializationException(e);
    }
View Full Code Here

    // If the newly assigned RS fails again(a chained RS failures scenario), the last flushed
    // sequence Id name space (sequence Id only valid for a particular RS instance), changes
    // when different newly assigned RS flushes the region.
    // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of
    // last flushed sequence Id for each failed RS instance.
    RegionStoreSequenceIds result = null;
    String nodePath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, encodedRegionName);
    nodePath = ZKUtil.joinZNode(nodePath, serverName);
    try {
      byte[] data = ZKUtil.getData(zkw, nodePath);
      if (data != null) {
View Full Code Here

      throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
    }
    RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
        ZooKeeperProtos.RegionStoreSequenceIds.newBuilder();
    int pblen = ProtobufUtil.lengthOfPBMagic();
    RegionStoreSequenceIds storeIds = null;
    try {
      storeIds = regionSequenceIdsBuilder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
    } catch (InvalidProtocolBufferException e) {
      throw new DeserializationException(e);
    }
View Full Code Here

        byte[] region = entry.getKey().getEncodedRegionName();
        String key = Bytes.toString(region);
        lastFlushedSequenceId = lastFlushedSequenceIds.get(key);
        if (lastFlushedSequenceId == null) {
          if (this.distributedLogReplay) {
            RegionStoreSequenceIds ids =
                SplitLogManager.getRegionFlushedSequenceId(this.watcher, failedServerName, key);
            if (ids != null) {
              lastFlushedSequenceId = ids.getLastFlushedSequenceId();
            }
          } else if (sequenceIdChecker != null) {
            lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region);
          }
          if (lastFlushedSequenceId == null) {
View Full Code Here

    // If the newly assigned RS fails again(a chained RS failures scenario), the last flushed
    // sequence Id name space (sequence Id only valid for a particular RS instance), changes
    // when different newly assigned RS flushes the region.
    // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of
    // last flushed sequence Id for each failed RS instance.
    RegionStoreSequenceIds result = null;
    String nodePath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, encodedRegionName);
    nodePath = ZKUtil.joinZNode(nodePath, serverName);
    try {
      byte[] data = ZKUtil.getData(zkw, nodePath);
      if (data != null) {
View Full Code Here

        Long cachedLastFlushedSequenceId =
            lastFlushedSequenceIds.get(loc.getRegionInfo().getEncodedName());

        // retrieve last flushed sequence Id from ZK. Because region postOpenDeployTasks will
        // update the value for the region
        RegionStoreSequenceIds ids =
            SplitLogManager.getRegionFlushedSequenceId(watcher, failedServerName, loc
                .getRegionInfo().getEncodedName());
        if (ids != null) {
          lastFlushedSequenceId = ids.getLastFlushedSequenceId();
          Map<byte[], Long> storeIds = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
          List<StoreSequenceId> maxSeqIdInStores = ids.getStoreSequenceIdList();
          for (StoreSequenceId id : maxSeqIdInStores) {
            storeIds.put(id.getFamilyName().toByteArray(), id.getSequenceId());
          }
          regionMaxSeqIdInStores.put(loc.getRegionInfo().getEncodedName(), storeIds);
        }
View Full Code Here

      throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
    }
    RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
        ZooKeeperProtos.RegionStoreSequenceIds.newBuilder();
    int pblen = ProtobufUtil.lengthOfPBMagic();
    RegionStoreSequenceIds storeIds = null;
    try {
      storeIds = regionSequenceIdsBuilder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
    } catch (InvalidProtocolBufferException e) {
      throw new DeserializationException(e);
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.