Package org.apache.cassandra.dht

Examples of org.apache.cassandra.dht.Token


            Gossiper.instance.initializeNodeUnsafe(endpoint, UUID.randomUUID(), 1);
            List<Token> tokens = new ArrayList<Token>();

            for (int j = 0; j < TOKENS_PER_NODE; j++)
            {
                Token token = new BigIntegerToken(String.valueOf(currentToken));
                tokenMap.put(token, endpoint);
                tokens.add(token);
                currentToken += TOKEN_STEP;
            }
View Full Code Here


            }
            expectedEndpoints.put(keyToken, endpoints);
        }

        // Relocate the first token from the first endpoint, to the second endpoint.
        Token relocateToken = new BigIntegerToken(String.valueOf(TOKEN_STEP));
        ss.onChange(
                InetAddress.getByName("127.0.0.2"),
                ApplicationState.STATUS,
                vvFactory.relocating(Collections.singleton(relocateToken)));
        assertTrue(tmd.isRelocating(relocateToken));
View Full Code Here

    {
        createInitialRing(5);

        // Node handling the relocation (dst), and the token being relocated (src).
        InetAddress relocator = InetAddress.getByName("127.0.0.3");
        Token relocatee = new BigIntegerToken(String.valueOf(TOKEN_STEP));

        // Send RELOCATING and ensure token status
        ss.onChange(relocator, ApplicationState.STATUS, vvFactory.relocating(Collections.singleton(relocatee)));
        assertTrue(tmd.isRelocating(relocatee));
View Full Code Here

            {
                int hashLen = dis.readInt();
                byte[] hash = hashLen >= 0 ? new byte[hashLen] : null;
                if (hash != null)
                    dis.readFully(hash);
                Token token = Token.serializer.deserialize(dis);
                Hashable lchild = Hashable.serializer.deserialize(dis, version);
                Hashable rchild = Hashable.serializer.deserialize(dis, version);
                return new Inner(token, lchild, rchild);
            }
View Full Code Here

    private Hashable initHelper(Token left, Token right, byte depth, byte max)
    {
        if (depth == max)
            // we've reached the leaves
            return new Leaf();
        Token midpoint = partitioner.midpoint(left, right);

        if (midpoint.equals(left) || midpoint.equals(right))
            return new Leaf();

        Hashable lchild =  initHelper(left, midpoint, inc(depth), max);
        Hashable rchild =  initHelper(midpoint, right, inc(depth), max);
        return new Inner(midpoint, lchild, rchild);
View Full Code Here

     * Takes two trees and a range for which they have hashes, but are inconsistent.
     * @return FULLY_INCONSISTENT if active is inconsistent, PARTIALLY_INCONSISTENT if only a subrange is inconsistent.
     */
    static int differenceHelper(MerkleTree ltree, MerkleTree rtree, List<TreeRange> diff, TreeRange active)
    {
        Token midpoint = ltree.partitioner().midpoint(active.left, active.right);
        TreeRange left = new TreeRange(null, active.left, midpoint, inc(active.depth), null);
        TreeRange right = new TreeRange(null, midpoint, active.right, inc(active.depth), null);
        byte[] lhash;
        byte[] rhash;

View Full Code Here

        if (depth >= hashdepth)
            throw new StopRecursion.TooDeep();

        if (hashable instanceof Leaf)
        {
            Token midpoint = partitioner.midpoint(pleft, pright);

            // We should not create a non-sensical range where start and end are the same token (this is non-sensical because range are
            // start exclusive). Note that we shouldn't hit that unless the full range is very small or we are fairly deep
            if (midpoint.equals(pleft) || midpoint.equals(pright))
                throw new StopRecursion.TooDeep();

            // split
            size++;
            return new Inner(midpoint, new Leaf(), new Leaf());
View Full Code Here

     * @param searchPosition the position the natural endpoints are requested for
     * @return a copy of the natural endpoints for the given token
     */
    public ArrayList<InetAddress> getNaturalEndpoints(RingPosition searchPosition)
    {
        Token searchToken = searchPosition.getToken();
        Token keyToken = TokenMetadata.firstToken(tokenMetadata.sortedTokens(), searchToken);
        ArrayList<InetAddress> endpoints = getCachedEndpoints(keyToken);
        if (endpoints == null)
        {
            TokenMetadata tokenMetadataClone = tokenMetadata.cloneOnlyTokenMap();
            keyToken = TokenMetadata.firstToken(tokenMetadataClone.sortedTokens(), searchToken);
View Full Code Here

        if (tokens.isEmpty())
            return endpoints;

        Iterator<Token> iter = TokenMetadata.ringIterator(tokens, token, false);
        Token primaryToken = iter.next();
        endpoints.add(metadata.getEndpoint(primaryToken));

        boolean bDataCenter = false;
        boolean bOtherRack = false;
        while (endpoints.size() < replicas && iter.hasNext())
        {
            // First try to find one in a different data center
            Token t = iter.next();
            if (!snitch.getDatacenter(metadata.getEndpoint(primaryToken)).equals(snitch.getDatacenter(metadata.getEndpoint(t))))
            {
                // If we have already found something in a diff datacenter no need to find another
                if (!bDataCenter)
                {
                    endpoints.add(metadata.getEndpoint(t));
                    bDataCenter = true;
                }
                continue;
            }
            // Now  try to find one on a different rack
            if (!snitch.getRack(metadata.getEndpoint(primaryToken)).equals(snitch.getRack(metadata.getEndpoint(t))) &&
                snitch.getDatacenter(metadata.getEndpoint(primaryToken)).equals(snitch.getDatacenter(metadata.getEndpoint(t))))
            {
                // If we have already found something in a diff rack no need to find another
                if (!bOtherRack)
                {
                    endpoints.add(metadata.getEndpoint(t));
                    bOtherRack = true;
                }
            }

        }

        // If we found N number of nodes we are good. This loop wil just exit. Otherwise just
        // loop through the list and add until we have N nodes.
        if (endpoints.size() < replicas)
        {
            iter = TokenMetadata.ringIterator(tokens, token, false);
            while (endpoints.size() < replicas && iter.hasNext())
            {
                Token t = iter.next();
                if (!endpoints.contains(metadata.getEndpoint(t)))
                    endpoints.add(metadata.getEndpoint(t));
            }
        }
View Full Code Here

                put(dc.getKey(), new LinkedHashSet<InetAddress>());
        }};
        Iterator<Token> tokenIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), searchToken, false);
        while (tokenIter.hasNext() && !hasSufficientReplicas(dcReplicas, allEndpoints))
        {
            Token next = tokenIter.next();
            InetAddress ep = tokenMetadata.getEndpoint(next);
            String dc = snitch.getDatacenter(ep);
            // have we already found all replicas for this dc?
            if (!datacenters.containsKey(dc) || hasSufficientReplicas(dc, dcReplicas, allEndpoints))
                continue;
View Full Code Here

TOP

Related Classes of org.apache.cassandra.dht.Token

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.