Package org.apache.cassandra.db

Examples of org.apache.cassandra.db.Table


                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s",
                                                rm.getTable(),
                                                rm.key(),
                                                "{" + StringUtils.join(rm.getColumnFamilies(), ", ") + "}"));
                final Table table = Table.open(rm.getTable());
                tablesRecovered.add(table);
                final Collection<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>(rm.getColumnFamilies());
                final long entryLocation = reader.getFilePointer();
                Runnable runnable = new WrappedRunnable()
                {
                    public void runMayThrow() throws IOException
                    {
                        /* remove column families that have already been flushed before applying the rest */
                        for (ColumnFamily columnFamily : columnFamilies)
                        {
                            int id = table.getColumnFamilyId(columnFamily.name());
                            if (!clHeader.isDirty(id) || entryLocation < clHeader.getPosition(id))
                            {
                                rm.removeColumnFamily(columnFamily);
                            }
                        }
                        if (!rm.isEmpty())
                        {
                            Table.open(rm.getTable()).apply(rm, null, false);
                        }
                    }
                };
                StageManager.getStage(StageManager.MUTATION_STAGE).execute(runnable);
                rows++;
            }
            reader.close();
        }

        // wait for all the writes to finish on the mutation stage
        while (StageManager.getStage(StageManager.MUTATION_STAGE).getCompletedTaskCount() < rows)
        {
            try
            {
                Thread.sleep(10);
            }
            catch (InterruptedException e)
            {
                throw new AssertionError(e);
            }
        }

        // flush replayed tables
        List<Future<?>> futures = new ArrayList<Future<?>>();
        for (Table table : tablesRecovered)
        {
            futures.addAll(table.flush());
        }
        // wait for flushes to finish
        for (Future<?> future : futures)
        {
            try
View Full Code Here


         * (2) anticompaction -- split out the keys in the range specified
         * (3) transfer the data.
        */
        try
        {
            Table table = Table.open(tableName);
            updateStatus("Flushing memtables for " + tableName + "...");
            for (Future f : table.flush())
            {
                try
                {
                    f.get();
                }
                catch (InterruptedException e)
                {
                    throw new RuntimeException(e);
                }
                catch (ExecutionException e)
                {
                    throw new RuntimeException(e);
                }
            }
            updateStatus("Performing anticompaction ...");
            /* Get the list of files that need to be streamed */
            transferSSTables(target, table.forceAntiCompaction(ranges, target), tableName); // SSTR GC deletes the file when done
        }
        catch (IOException e)
        {
            throw new IOError(e);
        }
View Full Code Here

    public void doVerb(Message message)
    {
        try
        {
            RangeCommand command = RangeCommand.read(message);
            Table table = Table.open(command.table);

            RangeReply rangeReply = table.getColumnFamilyStore(command.columnFamily).getKeyRange(command.startWith, command.stopAt, command.maxResults);
            Message response = rangeReply.getReply(message);
            if (logger.isDebugEnabled())
                logger.debug("Sending " + rangeReply + " to " + message.getMessageId() + "@" + message.getFrom());
            MessagingService.instance().sendOneWay(response, message.getFrom());
        }
View Full Code Here

            {
                CFPair request = this.deserialize(buffer);

                // trigger readonly-compaction
                logger.debug("Queueing readonly compaction for request from " + message.getFrom() + " for " + request);
                Table table = Table.open(request.left);
                CompactionManager.instance().submitReadonly(table.getColumnFamilyStore(request.right),
                                                            message.getFrom());
            }
            catch (Exception e)
            {
                logger.warn(LogUtil.throwableToString(e));           
View Full Code Here

        // initialize stuff
        for (String table : Table.getAllTableNames())
        {
            if (logger.isDebugEnabled())
                logger.debug("opening keyspace " + table);
            Table tbl = Table.open(table);
            tbl.onStart();
        }

        // replay the log if necessary
        RecoveryManager recoveryMgr = RecoveryManager.instance();
        recoveryMgr.doRecovery();
View Full Code Here

        List<String> tables = DatabaseDescriptor.getTables();
        for (String tName : tables)
        {
            try
            {
                Table table = Table.open(tName);
                if (logger.isDebugEnabled())
                  logger.debug("Flushing memtables ...");
                for (Future f : table.flush())
                {
                    try
                    {
                        f.get();
                    }
                    catch (InterruptedException e)
                    {
                        throw new RuntimeException(e);
                    }
                    catch (ExecutionException e)
                    {
                        throw new RuntimeException(e);
                    }
                }
                if (logger.isDebugEnabled())
                  logger.debug("Performing anticompaction ...");
                /* Get the list of files that need to be streamed */
                transferSSTables(target, table.forceAntiCompaction(ranges, target), tName); // SSTR GC deletes the file when done
            }
            catch (IOException e)
            {
                throw new IOError(e);
            }
View Full Code Here

            for ( String distinctEntry : distinctEntries )
            {
                String tableName;
                String[] pieces = FBUtilities.strip(distinctEntry, "-");
                tableName = pieces[0];
                Table table = Table.open( tableName );

                ColumnFamilyStore cfStore = table.getColumnFamilyStore(pieces[1]);
                if (logger.isDebugEnabled())
                  logger.debug("Generating file name for " + distinctEntry + " ...");
                fileNames.put(distinctEntry, cfStore.getTempSSTableFileName());
            }
View Full Code Here

                    if (logger.isDebugEnabled())
                        logger.debug(String.format("replaying mutation for %s.%s: %s",
                                                    rm.getTable(),
                                                    ByteBufferUtil.bytesToHex(rm.key()),
                                                    "{" + StringUtils.join(rm.getColumnFamilies(), ", ") + "}"));
                    final Table table = Table.open(rm.getTable());
                    tablesRecovered.add(table);
                    final Collection<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>(rm.getColumnFamilies());
                    final long entryLocation = reader.getFilePointer();
                    final CommitLogHeader finalHeader = clHeader;
                    final RowMutation frm = rm;
                    Runnable runnable = new WrappedRunnable()
                    {
                        public void runMayThrow() throws IOException
                        {
                            RowMutation newRm = new RowMutation(frm.getTable(), frm.key());

                            // Rebuild the row mutation, omitting column families that a) have already been flushed,
                            // b) are part of a cf that was dropped. Keep in mind that the cf.name() is suspect. do every
                            // thing based on the cfid instead.
                            for (ColumnFamily columnFamily : columnFamilies)
                            {
                                if (CFMetaData.getCF(columnFamily.id()) == null)
                                    // null means the cf has been dropped
                                    continue;

                                if (finalHeader == null || (finalHeader.isDirty(columnFamily.id()) && entryLocation > finalHeader.getPosition(columnFamily.id())))
                                    newRm.add(columnFamily);
                            }
                            if (!newRm.isEmpty())
                            {
                                Table.open(newRm.getTable()).apply(newRm, null, false);
                            }
                        }
                    };
                    futures.add(StageManager.getStage(Stage.MUTATION).submit(runnable));
                    if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT)
                    {
                        FBUtilities.waitOnFutures(futures);
                        futures.clear();
                    }
                }
            }
            finally
            {
                FileUtils.closeQuietly(reader);
                logger.info("Finished reading " + file);
            }
        }
       
        for (Map.Entry<Integer, AtomicInteger> entry : invalidMutations.entrySet())
            logger.info(String.format("Skipped %d mutations from unknown (probably removed) CF with id %d", entry.getValue().intValue(), entry.getKey()));

        // wait for all the writes to finish on the mutation stage
        FBUtilities.waitOnFutures(futures);
        logger.debug("Finished waiting on mutations from recovery");

        // flush replayed tables
        futures.clear();
        for (Table table : tablesRecovered)
            futures.addAll(table.flush());
        FBUtilities.waitOnFutures(futures);
    }
View Full Code Here

    {
        /* Create a local sstable for each remote sstable */
        Descriptor remotedesc = remote.desc;

        // new local sstable
        Table table = Table.open(remotedesc.ksname);
        ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
        Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath(remote.desc.version));

        return new PendingFile(localdesc, remote);
     }
View Full Code Here

        in.close();
    }
   
    private static SSTable makeSSTable()
    {
        Table t = Table.open("Keyspace1");
        for (int i = 0; i < 100; i++)
        {
            RowMutation rm = new RowMutation(t.name, ByteBufferUtil.bytes(Long.toString(System.nanoTime())));
            rm.add(new QueryPath("Standard1", null, ByteBufferUtil.bytes("cola")), ByteBufferUtil.bytes("value"), 0);
            try
            {
                rm.apply();
            }
            catch (IOException ex)
            {
                throw new RuntimeException(ex);
            }
        }
        try
        {
            t.getColumnFamilyStore("Standard1").forceBlockingFlush();
            return t.getColumnFamilyStore("Standard1").getSSTables().iterator().next();
        }
        catch (Exception any)
        {
            throw new RuntimeException(any);
        }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.Table

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.