Package com.splout.db.common

Examples of com.splout.db.common.Tablespace


        repEntry.setShard(i);
        repEntry.setNodes(Arrays.asList(new String[] { "localhost:" + dNodeConfig.getInt(DNodeProperties.PORT) }));
        replicationEntries.add(repEntry);
      }

      Tablespace tablespace1 = new Tablespace(new PartitionMap(partitions), new ReplicationMap(replicationEntries), 1l,
          0l);
      handler.getContext().getTablespaceVersionsMap().put(new TablespaceVersion("tablespace1", 1l), tablespace1);
      handler.getContext().getCurrentVersionsMap().put("tablespace1", 1l);

      // The following has to be read as: multi-query range [23-25)
View Full Code Here


          }

          Iterator<SwitchVersionRequest> it = versionsToCheck.iterator();
          while(it.hasNext()) {
            SwitchVersionRequest req = it.next();
            Tablespace t = context.getTablespaceVersionsMap().get(
                new TablespaceVersion(req.getTablespace(), req.getVersion()));
            // Check that this TablespaceVersion has been reported by some node through Hazelcast
            if(t != null && t.getReplicationMap() != null && t.getPartitionMap() != null
                && t.getPartitionMap().getPartitionEntries() != null
                && t.getReplicationMap().getReplicationEntries() != null
                && t.getReplicationMap().getReplicationEntries().size() > 0) {
              if(t.getPartitionMap().getPartitionEntries().size() == t.getReplicationMap()
                  .getReplicationEntries().size()) {
                log.info("Ok, TablespaceVersion [" + req.getTablespace() + ", " + req.getVersion()
                    + "] being handled by enough DNodes as reported by Hazelcast. ("
                    + t.getReplicationMap().getReplicationEntries() + ")");
                it.remove();
              }
            }
          }
        } while(versionsToCheck.size() > 0);

        log.info("All DNodes performed the deploy of version [" + version
            + "]. Publishing tablespaces...");

        // We finish by publishing the versions table with the new versions.
        try {
          switchVersions(switchActions());
        } catch(UnexistingVersion e) {
          throw new RuntimeException(
              "Unexisting version after deploying this version. Sounds like a bug.", e);
        }

        // If some replicas are under-replicated, start a balancing process
        context.maybeBalance();

        log.info("Deploy of version [" + version + "] Finished PROPERLY. :-)");
        context.getCoordinationStructures().logDeployMessage(version,
            "Deploy of version [" + version + "] finished properly.");
        context.getCoordinationStructures().getDeploymentsStatusPanel()
            .put(version, DeployStatus.FINISHED);
      } catch(InterruptedException e) {
        log.error("Error while deploying version [" + version + "]", e);
        abortDeploy(dnodes, e.getMessage(), version);
      } catch(Throwable t) {
        t.printStackTrace();
        throw new RuntimeException(t);
      } finally {
        CoordinationStructures.DEPLOY_IN_PROGRESS.decrementAndGet();
      }
    }
View Full Code Here

      while(iterator.hasNext()) {
        Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion = iterator.next();
        String tablespaceName = tablespaceVersion.getKey().getTablespace();
        Long version = tablespaceVersion.getKey().getVersion();
        // Is this DNode present in this version?
        Tablespace tablespace = tablespaceVersion.getValue();
        // We will rebuild the replication map to check if it became empty after the checkings or not
        int nonEmptyReplicas = 0;

        Iterator<ReplicationEntry> repIter = tablespace.getReplicationMap().getReplicationEntries()
            .iterator();
        while(repIter.hasNext()) {
          ReplicationEntry entry = repIter.next();
          int partition = entry.getShard();
          if(entry.getNodes().contains(dNodeInfo.getAddress())) {
            // Yes!
            // So we have to check if this DNode is still serving this version/partition or not
            if((dNodeInfo.getServingInfo().get(tablespaceName) == null)
                || (dNodeInfo.getServingInfo().get(tablespaceName).get(version) == null)
                || (dNodeInfo.getServingInfo().get(tablespaceName).get(version).get(partition) == null)) {
              // NO! So we have to remove the DNode
              entry.getNodes().remove(dNodeInfo.getAddress());
              if(entry.getNodes().isEmpty()) {
                repIter.remove();
                // Remove also from PartitionMap
                PartitionEntry pEntry = new PartitionEntry();
                pEntry.setShard(entry.getShard());
                tablespace.getPartitionMap().getPartitionEntries().remove(pEntry);
              }
            }
          }
          if(!entry.getNodes().isEmpty()) {
            nonEmptyReplicas++;
          }
        }
        if(nonEmptyReplicas == 0) {
          // Delete TablespaceVersion
          log.info("Removing empty tablespace version (implicit leaving from " + dNodeInfo.getAddress()
              + "): " + tablespaceName + ", " + version);
          iterator.remove();
        }
      }

      // Now iterate over all the tablespaces of this DNode to see new additions or EXPLICIT leavings
      for(Map.Entry<String, Map<Long, Map<Integer, PartitionMetadata>>> tablespaceEntry : dNodeInfo
          .getServingInfo().entrySet()) {
        String tablespaceName = tablespaceEntry.getKey();
        // Iterate over all versions of this tablespace
        for(Map.Entry<Long, Map<Integer, PartitionMetadata>> versionEntry : tablespaceEntry.getValue()
            .entrySet()) {
          Long versionName = versionEntry.getKey();
          TablespaceVersion tablespaceVersion = new TablespaceVersion(tablespaceName, versionName);
          Tablespace currentTablespace = tablespaceVersionMap.get(tablespaceVersion);
          List<PartitionEntry> partitionMap = new ArrayList<PartitionEntry>();
          List<ReplicationEntry> replicationMap = new ArrayList<ReplicationEntry>();
          long deployDate = -1;
          if(currentTablespace != null) {
            // Not first time we see this tablespace. We do a copy of the partition map to be able to modify it without
            // contention.
            partitionMap.addAll(currentTablespace.getPartitionMap().getPartitionEntries());
            replicationMap.addAll(currentTablespace.getReplicationMap().getReplicationEntries());
            deployDate = currentTablespace.getCreationDate();
          }
          // Iterate over all partitions of this tablespace
          for(Map.Entry<Integer, PartitionMetadata> partition : versionEntry.getValue().entrySet()) {
            deployDate = deployDate == -1 ? partition.getValue().getDeploymentDate() : deployDate;
            if(deployDate != -1 && (deployDate != partition.getValue().getDeploymentDate())) {
              throw new TablespaceVersionInfoException(
                  "Inconsistent partition metadata within same node, deploy date was " + deployDate
                      + " versus " + partition.getValue().getDeploymentDate());
            }
            PartitionMetadata metadata = partition.getValue();
            Integer shard = partition.getKey();
            // Create a PartitionEntry according to this PartitionMetadata
            PartitionEntry myEntry = new PartitionEntry();
            myEntry.setMax(metadata.getMaxKey());
            myEntry.setMin(metadata.getMinKey());
            myEntry.setShard(shard);
            PartitionEntry existingPartitionEntry = null;
            // Look for an existing PartitionEntry for the same shard in the PartitionMap
            if(!partitionMap.contains(myEntry)) {
              if(!event.equals(DNodeEvent.LEAVE)) {
                // In this case all conditions are met for adding a new entry to the PartitionMap
                partitionMap.add(myEntry);
                // Note that now the PartitionMap is not necessarily sorted! let's sort it now
                Collections.sort(partitionMap);
              }
            } else {
              // Check consistency of this Partition Metadata
              existingPartitionEntry = partitionMap.get(partitionMap.indexOf(myEntry));
              if(existingPartitionEntry.getMax() == null || myEntry.getMax() == null) {
                if(!(existingPartitionEntry.getMax() == null && myEntry.getMax() == null)) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              } else {
                if(!existingPartitionEntry.getMax().equals(myEntry.getMax())) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              }
              if(existingPartitionEntry.getMin() == null || myEntry.getMin() == null) {
                if(!(existingPartitionEntry.getMin() == null && myEntry.getMin() == null)) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              } else {
                if(!existingPartitionEntry.getMin().equals(myEntry.getMin())) {
                  throw new TablespaceVersionInfoException(
                      "Inconsistent partition metadata between nodes: " + existingPartitionEntry
                          + " versus " + myEntry);
                }
              }
            }
            // Create a ReplicationEntry according to this PartitionMetadata
            // Will only contain this DNode as we don't know about the others yet
            ReplicationEntry reEntry = new ReplicationEntry();
            reEntry.setShard(shard);
            reEntry.setExpectedReplicationFactor(metadata.getNReplicas());
            reEntry.setNodes(new ArrayList<String>());
            // Look for an existing ReplicationEntry for the same shard in the ReplicationMap
            if(replicationMap.contains(reEntry)) {
              ReplicationEntry existingEntry = replicationMap.get(replicationMap.indexOf(reEntry));
              if(event.equals(DNodeEvent.LEAVE)) {
                // Remove it from replication map and partition map
                existingEntry.getNodes().remove(dNodeInfo.getAddress());
                if(existingEntry.getNodes().isEmpty()) {
                  replicationMap.remove(existingEntry);
                  if(existingPartitionEntry != null) {
                    partitionMap.remove(existingPartitionEntry);
                  } else {
                    throw new RuntimeException(
                        "ReplicationEntry for one shard with no associated PartitionEntry. This is very likely to be a software bug.");
                  }
                }
              } else {
                if(!existingEntry.getNodes().contains(dNodeInfo.getAddress())) {
                  // Add it to replication map
                  existingEntry.getNodes().add(dNodeInfo.getAddress());
                } else {
                  // We are adding / updating but the node already exists in the replication map.
                }
              }
            } else if(!event.equals(DNodeEvent.LEAVE)) { // Otherwise just add and sort
              // We check the DNodeEvent but although would be very weird if this DNode leaves and its ReplicationEntry
              // wasn't present
              reEntry.getNodes().add(dNodeInfo.getAddress());
              replicationMap.add(reEntry);
              Collections.sort(reEntry.getNodes());
              Collections.sort(replicationMap);
            }
          }
          // Delete tablespaceVersion if it is empty now
          if(currentTablespace != null && replicationMap.size() == 0) {
            log.info("Removing empty tablespaceVersion: " + tablespaceVersion
                + " due to explicit leaving from node " + dNodeInfo.getAddress());
            tablespaceVersionMap.remove(tablespaceVersion);
          } else {
            // Update the info in memory
            currentTablespace = new Tablespace(new PartitionMap(partitionMap), new ReplicationMap(
                replicationMap), versionName, deployDate);
            tablespaceVersionMap.put(tablespaceVersion, currentTablespace);
          }
        }
      }
View Full Code Here

    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);

    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, "localhost:4444", "localhost:5555", "localhost:6666"));

    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
    context.getCurrentVersionsMap().put("t1", 0l);

    Querier querier = new Querier(context);
   
View Full Code Here

    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress()));

    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);
   
    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
    context.getCurrentVersionsMap().put("t1", 0l);

    Querier querier = new Querier(context);
   
View Full Code Here

    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, dnode1.getAddress(), "fakeaddress:1111", dnode2.getAddress(), "fakeaddress:2222"));

    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);
   
    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
    context.getCurrentVersionsMap().put("t1", 0l);

    Querier querier = new Querier(context);
   
View Full Code Here

    List<ReplicationEntry> rEntries = new ArrayList<ReplicationEntry>();
    rEntries.add(new ReplicationEntry(0, dnode1.getAddress(), "failingaddress:1111", dnode2.getAddress(), "failingaddress:2222"));

    QNodeHandlerContext context = new QNodeHandlerContext(testConfig, null);
   
    Tablespace tablespace = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(rEntries), 0, 0);
    context.getTablespaceVersionsMap().put(new TablespaceVersion("t1", 0l), tablespace);
    context.getCurrentVersionsMap().put("t1", 0l);

    Querier querier = new Querier(context);
   
View Full Code Here

      SortedSet<Tablespace> allVersions = tablespaces.get(tablespace);
      Iterator<Tablespace> it = allVersions.iterator();
      boolean foundVersionBeingServed = false;
      int countVersionsAfter = 0;
      while(it.hasNext()) {
        Tablespace tb = it.next();
        if(versionBeingServed.equals(tb.getVersion())) {
          foundVersionBeingServed = true;
        } else {
          if(foundVersionBeingServed) {
            countVersionsAfter++;
            if(countVersionsAfter >= maxVersionsPerTablespace) {
              // This is the case where we remove the version
              // 1 - This tablespace has a version being served
              // 2 - This version is older than the current tablespace being served
              // 3 - We are already keeping maxVersionsPerTablespace versions
              tablespacesToRemove.add(new com.splout.db.thrift.TablespaceVersion(tablespace, tb
                  .getVersion()));
              log.info("Tablespace [" + tablespace + "] Version [" + tb.getVersion() + "] "
                  + "created at [" + new Date(tb.getCreationDate())
                  + "] REMOVED. We already keep younger versions.");
            } else {
              log.info("Tablespace [" + tablespace + "] Version [" + tb.getVersion() + "] "
                  + "created at [" + new Date(tb.getCreationDate())
                  + "] KEPT.");
            }
          } else {
            log.info("Tablespace [" + tablespace + "] Version [" + tb.getVersion() + "] "
                + "created at [" + new Date(tb.getCreationDate()) + "] either younger than served one or without version being served. Keeping.");
          }
        }
      }

      if(!foundVersionBeingServed) {
View Full Code Here

    DNode dnode1 = TestUtils.getTestDNode(config1, failingDHandler, "dnode-" + this.getClass().getName() + "-1");
    DNode dnode2 = TestUtils.getTestDNode(config2, dHandler, "dnode-" + this.getClass().getName() + "-2");

    try {
      ReplicationEntry repEntry = new ReplicationEntry(0, dnode1.getAddress(), dnode2.getAddress());
      Tablespace tablespace1 = new Tablespace(PartitionMap.oneShardOpenedMap(), new ReplicationMap(Arrays.asList(repEntry)), 1l, 0l);
      handler.getContext().getTablespaceVersionsMap().put(new TablespaceVersion("tablespace1", 1l), tablespace1);
      handler.getContext().getCurrentVersionsMap().put("tablespace1", 1l);

      QueryStatus qStatus = handler.query("tablespace1", "2", "SELECT 1;", null);
      Assert.assertEquals(new Integer(0), qStatus.getShard());
View Full Code Here

      CoordinationStructures coord = new CoordinationStructures(hz);
     
      handler.init(config);

      for(int i = 0; i < 8; i++) {
        handler.getContext().getTablespaceVersionsMap().put(new TablespaceVersion("t1", i), new Tablespace(null, null, i, 0l));
      }
     
      Map<String, Long> versionsBeingServed = new HashMap<String, Long>();
      versionsBeingServed.put("t1", 6l);
      coord.getVersionsBeingServed().put(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, versionsBeingServed);
View Full Code Here

TOP

Related Classes of com.splout.db.common.Tablespace

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.