Package com.splout.db.common

Examples of com.splout.db.common.ReplicationEntry


    long deployDate = System.currentTimeMillis(); // Here is where we decide the data of the deployment for all deployed
                                                  // tablespaces

    for(DeployRequest req : deployRequests) {
      for(Object obj : req.getReplicationMap()) {
        ReplicationEntry rEntry = (ReplicationEntry) obj;
        PartitionEntry pEntry = null;
        for(PartitionEntry partEntry : req.getPartitionMap()) {
          if(partEntry.getShard().equals(rEntry.getShard())) {
            pEntry = partEntry;
          }
        }
        if(pEntry == null) {
          String msg = "No Partition metadata for shard: " + rEntry.getShard()
              + " this is very likely to be a software bug.";
          log.error(msg);
          try {
            log.error("Partition map: " + JSONSerDe.ser(req.getPartitionMap()));
            log.error("Replication map: " + JSONSerDe.ser(req.getReplicationMap()));
          } catch (JSONSerDe.JSONSerDeException e) {
            log.error("JSON error", e);
          }
          throw new RuntimeException(msg);
        }
        // Normalize DNode ids -> The convention is that DNodes are identified by host:port . So we need to strip the
        // protocol, if any
        for(int i = 0; i < rEntry.getNodes().size(); i++) {
          String dnodeId = rEntry.getNodes().get(i);
          if(dnodeId.startsWith("tcp://")) {
            dnodeId = dnodeId.substring("tcp://".length(), dnodeId.length());
          }
          rEntry.getNodes().set(i, dnodeId);
        }
        for(String dNode : rEntry.getNodes()) {
          List<DeployAction> actionsSoFar = (List<DeployAction>) MapUtils.getObject(actions, dNode,
              new ArrayList<DeployAction>());
          actions.put(dNode, actionsSoFar);
          DeployAction deployAction = new DeployAction();
          deployAction.setDataURI(req.getData_uri() + "/" + rEntry.getShard() + ".db");
          deployAction.setTablespace(req.getTablespace());
          deployAction.setVersion(version);
          deployAction.setPartition(rEntry.getShard());

          // Add partition metadata to the deploy action for DNodes to save it
          PartitionMetadata metadata = new PartitionMetadata();
          metadata.setMinKey(pEntry.getMin());
          metadata.setMaxKey(pEntry.getMax());
          metadata.setNReplicas(rEntry.getNodes().size());
          metadata.setDeploymentDate(deployDate);
          metadata.setInitStatements(req.getInitStatements());
          metadata.setEngineId(req.getEngine());

          deployAction.setMetadata(metadata);
View Full Code Here


        partitionEntry.setMin((i * 10 + 10) + "");
        partitionEntry.setMax((i * 10 + 20) + "");
        partitionEntry.setShard(i);
        partitions.add(partitionEntry);
        // And the replication
        ReplicationEntry repEntry = new ReplicationEntry();
        repEntry.setShard(i);
        repEntry.setNodes(Arrays.asList(new String[] { "localhost:" + dNodeConfig.getInt(DNodeProperties.PORT) }));
        replicationEntries.add(repEntry);
      }

      Tablespace tablespace1 = new Tablespace(new PartitionMap(partitions), new ReplicationMap(replicationEntries), 1l,
          0l);
View Full Code Here

    DNode dnode2 = TestUtils.getTestDNode(config2, succeed, "dnode-" + this.getClass().getName() + "-2");

    try {
      handler.init(config);

      ReplicationEntry repEntry1 = new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress());

      DeployRequest deployRequest1 = new DeployRequest();
      deployRequest1.setTablespace("t1");
      deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries());
      deployRequest1.setReplicationMap(Arrays.asList(repEntry1));
View Full Code Here

        int nonEmptyReplicas = 0;

        Iterator<ReplicationEntry> repIter = tablespace.getReplicationMap().getReplicationEntries()
            .iterator();
        while(repIter.hasNext()) {
          ReplicationEntry entry = repIter.next();
          int partition = entry.getShard();
          if(entry.getNodes().contains(dNodeInfo.getAddress())) {
            // Yes!
            // So we have to check if this DNode is still serving this version/partition or not
            if((dNodeInfo.getServingInfo().get(tablespaceName) == null)
                || (dNodeInfo.getServingInfo().get(tablespaceName).get(version) == null)
                || (dNodeInfo.getServingInfo().get(tablespaceName).get(version).get(partition) == null)) {
              // NO! So we have to remove the DNode
              entry.getNodes().remove(dNodeInfo.getAddress());
              if(entry.getNodes().isEmpty()) {
                repIter.remove();
                // Remove also from PartitionMap
                PartitionEntry pEntry = new PartitionEntry();
                pEntry.setShard(entry.getShard());
                tablespace.getPartitionMap().getPartitionEntries().remove(pEntry);
              }
            }
          }
          if(!entry.getNodes().isEmpty()) {
            nonEmptyReplicas++;
          }
        }
        if(nonEmptyReplicas == 0) {
          // Delete TablespaceVersion
          log.info("Removing empty tablespace version (implicit leaving from " + dNodeInfo.getAddress()
              + "): " + tablespaceName + ", " + version);
          iterator.remove();
        }
      }

      // Now iterate over all the tablespaces of this DNode to see new additions or EXPLICIT leavings
      for(Map.Entry<String, Map<Long, Map<Integer, PartitionMetadata>>> tablespaceEntry : dNodeInfo
          .getServingInfo().entrySet()) {
        String tablespaceName = tablespaceEntry.getKey();
        // Iterate over all versions of this tablespace
        for(Map.Entry<Long, Map<Integer, PartitionMetadata>> versionEntry : tablespaceEntry.getValue()
            .entrySet()) {
          Long versionName = versionEntry.getKey();
          TablespaceVersion tablespaceVersion = new TablespaceVersion(tablespaceName, versionName);
          Tablespace currentTablespace = tablespaceVersionMap.get(tablespaceVersion);
          List<PartitionEntry> partitionMap = new ArrayList<PartitionEntry>();
          List<ReplicationEntry> replicationMap = new ArrayList<ReplicationEntry>();
          long deployDate = -1;
          if(currentTablespace != null) {
            // Not first time we see this tablespace. We do a copy of the partition map to be able to modify it without
            // contention.
            partitionMap.addAll(currentTablespace.getPartitionMap().getPartitionEntries());
            replicationMap.addAll(currentTablespace.getReplicationMap().getReplicationEntries());
            deployDate = currentTablespace.getCreationDate();
          }
          // Iterate over all partitions of this tablespace
          for(Map.Entry<Integer, PartitionMetadata> partition : versionEntry.getValue().entrySet()) {
            deployDate = deployDate == -1 ? partition.getValue().getDeploymentDate() : deployDate;
            if(deployDate != -1 && (deployDate != partition.getValue().getDeploymentDate())) {
              throw new TablespaceVersionInfoException(
                  "Inconsistent partition metadata within same node, deploy date was " + deployDate
                      + " versus " + partition.getValue().getDeploymentDate());
            }
            PartitionMetadata metadata = partition.getValue();
            Integer shard = partition.getKey();
            // Create a PartitionEntry according to this PartitionMetadata
            PartitionEntry myEntry = new PartitionEntry();
            myEntry.setMax(metadata.getMaxKey());
            myEntry.setMin(metadata.getMinKey());
            myEntry.setShard(shard);
            PartitionEntry existingPartitionEntry = null;
            // Look for an existing PartitionEntry for the same shard in the PartitionMap
            if(!partitionMap.contains(myEntry)) {
              if(!event.equals(DNodeEvent.LEAVE)) {
                // In this case all conditions are met for adding a new entry to the PartitionMap
                partitionMap.add(myEntry);
                // Note that now the PartitionMap is not necessarily sorted! let's sort it now
                Collections.sort(partitionMap);
              }
            } else {
              // Check consistency of this Partition Metadata
              existingPartitionEntry = partitionMap.get(partitionMap.indexOf(myEntry));
              if(existingPartitionEntry.getMax() == null || myEntry.getMax() == null) {
                if(!(existingPartitionEntry.getMax() == null && myEntry.getMax() == null)) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              } else {
                if(!existingPartitionEntry.getMax().equals(myEntry.getMax())) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              }
              if(existingPartitionEntry.getMin() == null || myEntry.getMin() == null) {
                if(!(existingPartitionEntry.getMin() == null && myEntry.getMin() == null)) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              } else {
                if(!existingPartitionEntry.getMin().equals(myEntry.getMin())) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              }
            }
            // Create a ReplicationEntry according to this PartitionMetadata
            // Will only contain this DNode as we don't know about the others yet
            ReplicationEntry reEntry = new ReplicationEntry();
            reEntry.setShard(shard);
            reEntry.setExpectedReplicationFactor(metadata.getNReplicas());
            reEntry.setNodes(new ArrayList<String>());
            // Look for an existing ReplicationEntry for the same shard in the ReplicationMap
            if(replicationMap.contains(reEntry)) {
              ReplicationEntry existingEntry = replicationMap.get(replicationMap.indexOf(reEntry));
              if(event.equals(DNodeEvent.LEAVE)) {
                // Remove it from replication map and partition map
                existingEntry.getNodes().remove(dNodeInfo.getAddress());
                if(existingEntry.getNodes().isEmpty()) {
                  replicationMap.remove(existingEntry);
                  if(existingPartitionEntry != null) {
                    partitionMap.remove(existingPartitionEntry);
                  } else {
                    throw new RuntimeException(
                        "ReplicationEntry for one shard with no associated PartitionEntry. This is very likely to be a software bug.");
                  }
                }
              } else {
                if(!existingEntry.getNodes().contains(dNodeInfo.getAddress())) {
                  // Add it to replication map
                  existingEntry.getNodes().add(dNodeInfo.getAddress());
                } else {
                  // We are adding / updating but the node already exists in the replication map.
                }
              }
            } else if(!event.equals(DNodeEvent.LEAVE)) { // Otherwise just add and sort
View Full Code Here

  public void testAllFailingDNodes() throws JSONSerDeException {
    SploutConfiguration testConfig = SploutConfiguration.getTestConfig();
    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);

    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, "localhost:4444", "localhost:5555", "localhost:6666"));

    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
    context.getCurrentVersionsMap().put("t1", 0l);
View Full Code Here

    };
    DNode dnode1 = TestUtils.getTestDNode(testConfig, okQueryHandler, "dnode-" + this.getClass().getName() + "-1");
    DNode dnode2 = TestUtils.getTestDNode(testConfig, okQueryHandler, "dnode-" + this.getClass().getName() + "-2");
   
    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress()));

    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);
   
    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
View Full Code Here

    };
    DNode dnode1 = TestUtils.getTestDNode(testConfig, okQueryHandler, "dnode-" + this.getClass().getName() + "-1");
    DNode dnode2 = TestUtils.getTestDNode(testConfig, okQueryHandler, "dnode-" + this.getClass().getName() + "-2");
   
    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, dnode1.getAddress(), "fakeaddress:1111", dnode2.getAddress(), "fakeaddress:2222"));

    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);
   
    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
View Full Code Here

    };
    DNode dnode1 = TestUtils.getTestDNode(testConfig, okQueryHandler, "dnode-" + this.getClass().getName() + "-1");
    DNode dnode2 = TestUtils.getTestDNode(testConfig, okQueryHandler, "dnode-" + this.getClass().getName() + "-2");
   
    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, dnode1.getAddress(), "failingaddress:1111", dnode2.getAddress(), "failingaddress:2222"));

    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);
   
    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
View Full Code Here

   
    DNode dnode1 = TestUtils.getTestDNode(config1, failingDHandler, "dnode-" + this.getClass().getName() + "-1");
    DNode dnode2 = TestUtils.getTestDNode(config2, dHandler, "dnode-" + this.getClass().getName() + "-2");

    try {
      ReplicationEntry repEntry = new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress());
      Tablespace tablespace1 = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(Arrays.asList(repEntry)), 1l, 0l);
      handler.getContext().getTablespaceVersionsMap().put(new TablespaceVersion("tablespace1", 1l), tablespace1);
      handler.getContext().getCurrentVersionsMap().put("tablespace1", 1l);

      QueryStatus qStatus = handler.query("tablespace1", "2", "SELECT 1;", null);
View Full Code Here

    DNode dnode2 = TestUtils.getTestDNode(config2, stuck, "dnode-" + this.getClass().getName() + "-2");

    try {
      handler.init(config);

      ReplicationEntry repEntry1 = new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress());

      DeployRequest deployRequest1 = new DeployRequest();
      deployRequest1.setTablespace("partition1");
      deployRequest1.setPartitionMap(PartitionMap.oneShardOpenedMap().getPartitionEntries());
      deployRequest1.setReplicationMap(Arrays.asList(repEntry1));
View Full Code Here

TOP

Related Classes of com.splout.db.common.ReplicationEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.