Package org.apache.cassandra.io

Examples of org.apache.cassandra.io.SSTable$KeyPosition


                 * is present in the BloomFilter. If not continue to the next file.
                */
                boolean bVal = SSTable.isKeyInFile(key, file);
                if ( !bVal )
                    continue;
                SSTable ssTable = new SSTable(file);
                ssTable.touch(key, fData);
            }
        }
        finally
        {
            lock_.readLock().unlock();
View Full Code Here


    }


    private ColumnFamily fetchColumnFamily(String key, String cf, IFilter filter, String ssTableFile) throws IOException
  {
    SSTable ssTable = new SSTable(ssTableFile);
    long start = System.currentTimeMillis();
    DataInputBuffer bufIn;
    bufIn = filter.next(key, cf, ssTable);
    logger_.debug("DISK ssTable.next TIME: " + (System.currentTimeMillis() - start) + " ms.");
    if (bufIn.getLength() == 0)
View Full Code Here

          }
          PriorityQueue<FileStruct> pq = initializePriorityQueue(files, ranges, ColumnFamilyStore.bufSize_);
          if (pq.size() > 0)
          {
              mergedFileName = getTempFileName();
              SSTable ssTableRange = null ;
              String lastkey = null;
              List<FileStruct> lfs = new ArrayList<FileStruct>();
              DataOutputBuffer bufOut = new DataOutputBuffer();
              int expectedBloomFilterSize = SSTable.getApproximateKeyCount(files);
              expectedBloomFilterSize = (expectedBloomFilterSize > 0) ? expectedBloomFilterSize : SSTable.indexInterval();
              logger_.debug("Expected bloom filter size : " + expectedBloomFilterSize);
              /* Create the bloom filter for the compacted file. */
              BloomFilter compactedRangeBloomFilter = new BloomFilter(expectedBloomFilterSize, 15);
              List<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>();

              while (pq.size() > 0 || lfs.size() > 0)
              {
                  FileStruct fs = null;
                  if (pq.size() > 0)
                  {
                      fs = pq.poll();
                  }
                  if (fs != null
                          && (lastkey == null || lastkey.equals(fs.getKey())))
                  {
                      // The keys are the same so we need to add this to the
                      // ldfs list
                      lastkey = fs.getKey();
                      lfs.add(fs);
                  }
                  else
                  {
                      Collections.sort(lfs, new FileStructComparator());
                      ColumnFamily columnFamily;
                      bufOut.reset();
                      if(lfs.size() > 1)
                      {
                        for (FileStruct filestruct : lfs)
                        {
                          try
                          {
                                  /* read the length although we don't need it */
                                  filestruct.getBufIn().readInt();
                                  // Skip the Index
                                    IndexHelper.skipBloomFilterAndIndex(filestruct.getBufIn());
                                  // We want to add only 2 and resolve them right there in order to save on memory footprint
                                  if(columnFamilies.size() > 1)
                                  {
                                  // Now merge the 2 column families
                                        merge(columnFamilies);
                                  }
                              // deserialize into column families
                              columnFamilies.add(ColumnFamily.serializer().deserialize(filestruct.getBufIn()));
                          }
                          catch ( Exception ex)
                          {
                                    logger_.warn(LogUtil.throwableToString(ex));
                                }
                        }
                        // Now after merging all crap append to the sstable
                        columnFamily = resolveAndRemoveDeleted(columnFamilies);
                        columnFamilies.clear();
                        if( columnFamily != null )
                        {
                        /* serialize the cf with column indexes */
                          ColumnFamily.serializerWithIndexes().serialize(columnFamily, bufOut);
                        }
                      }
                      else
                      {
                        FileStruct filestruct = lfs.get(0);
                        try
                        {
                            /* read the length although we don't need it */
                            int size = filestruct.getBufIn().readInt();
                            bufOut.write(filestruct.getBufIn(), size);
                        }
                        catch ( Exception ex)
                        {
                          logger_.warn(LogUtil.throwableToString(ex));
                              filestruct.close();
                              continue;
                        }
                      }
                      if ( Range.isKeyInRanges(ranges, p.undecorateKey(lastkey)) )
                      {
                          if(ssTableRange == null )
                          {
                            if( target != null )
                              rangeFileLocation = rangeFileLocation + System.getProperty("file.separator") + "bootstrap";
                            FileUtils.createDirectory(rangeFileLocation);
                              ssTableRange = new SSTable(rangeFileLocation, mergedFileName);
                          }                         
                          try
                          {
                            ssTableRange.append(lastkey, bufOut);
                            compactedRangeBloomFilter.add(lastkey);
                          }
                          catch(Exception ex)
                          {
                              logger_.warn( LogUtil.throwableToString(ex) );
                          }
                      }
                      totalkeysWritten++;
                      for (FileStruct filestruct : lfs)
                      {
                        try
                        {
                                filestruct.advance();
                          if (filestruct.isExhausted())
                          {
                            continue;
                          }
                          /* keep on looping until we find a key in the range */
                              while ( !Range.isKeyInRanges(ranges, p.undecorateKey(filestruct.getKey())) )
                              {
                                    filestruct.advance();
                                    if (filestruct.isExhausted())
                            {
                              break;
                            }
                                /* check if we need to continue , if we are done with ranges empty the queue and close all file handles and exit */
                                //if( !isLoop && StorageService.hash(filestruct.key).compareTo(maxRange.right()) > 0 && !filestruct.key.equals(""))
                                //{
                                      //filestruct.reader.close();
                                      //filestruct = null;
                                      //break;
                                //}
                              }
                              if (!filestruct.isExhausted())
                              {
                                pq.add(filestruct);
                              }
                            totalkeysRead++;
                        }
                        catch ( Exception ex )
                        {
                          // Ignore the exception as it might be a corrupted file
                          // in any case we have read as far as possible from it
                          // and it will be deleted after compaction.
                                logger_.warn(LogUtil.throwableToString(ex));
                              filestruct.close();
                            }
                      }
                      lfs.clear();
                      lastkey = null;
                      if (fs != null)
                      {
                          // Add back the fs since we processed the rest of
                          // filestructs
                          pq.add(fs);
                      }
                  }
              }
              if( ssTableRange != null )
              {
                    if ( fileList == null )
                        fileList = new ArrayList<String>();
                    ssTableRange.closeRename(compactedRangeBloomFilter, fileList);
                    if(compactedBloomFilters != null)
                      compactedBloomFilters.add(compactedRangeBloomFilter);
              }
          }
        }
View Full Code Here

        }
        PriorityQueue<FileStruct> pq = initializePriorityQueue(files, null, minBufferSize);
        if (pq.size() > 0)
        {
            String mergedFileName = getTempFileName( files );
            SSTable ssTable = null;
            String lastkey = null;
            List<FileStruct> lfs = new ArrayList<FileStruct>();
            DataOutputBuffer bufOut = new DataOutputBuffer();
            int expectedBloomFilterSize = SSTable.getApproximateKeyCount(files);
            expectedBloomFilterSize = (expectedBloomFilterSize > 0) ? expectedBloomFilterSize : SSTable.indexInterval();
            logger_.debug("Expected bloom filter size : " + expectedBloomFilterSize);
            /* Create the bloom filter for the compacted file. */
            BloomFilter compactedBloomFilter = new BloomFilter(expectedBloomFilterSize, 15);
            List<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>();

            while (pq.size() > 0 || lfs.size() > 0)
            {
                FileStruct fs = null;
                if (pq.size() > 0)
                {
                    fs = pq.poll();
                }
                if (fs != null
                        && (lastkey == null || lastkey.equals(fs.getKey())))
                {
                    // The keys are the same so we need to add this to the
                    // ldfs list
                    lastkey = fs.getKey();
                    lfs.add(fs);
                }
                else
                {
                    Collections.sort(lfs, new FileStructComparator());
                    ColumnFamily columnFamily;
                    bufOut.reset();
                    if(lfs.size() > 1)
                    {
                        for (FileStruct filestruct : lfs)
                        {
                            try
                            {
                                /* read the length although we don't need it */
                                filestruct.getBufIn().readInt();
                                // Skip the Index
                                IndexHelper.skipBloomFilterAndIndex(filestruct.getBufIn());
                                // We want to add only 2 and resolve them right there in order to save on memory footprint
                                if(columnFamilies.size() > 1)
                                {
                                    merge(columnFamilies);
                                }
                                // deserialize into column families
                                columnFamilies.add(ColumnFamily.serializer().deserialize(filestruct.getBufIn()));
                            }
                            catch ( Exception ex)
                            {
                                logger_.warn("error in filecompaction", ex);
                            }
                        }
                        // Now after merging all crap append to the sstable
                        columnFamily = resolveAndRemoveDeleted(columnFamilies);
                        columnFamilies.clear();
                        if( columnFamily != null )
                        {
                            /* serialize the cf with column indexes */
                            ColumnFamily.serializerWithIndexes().serialize(columnFamily, bufOut);
                        }
                    }
                    else
                    {
                        FileStruct filestruct = lfs.get(0);
                        try
                        {
                            /* read the length although we don't need it */
                            int size = filestruct.getBufIn().readInt();
                            bufOut.write(filestruct.getBufIn(), size);
                        }
                        catch ( Exception ex)
                        {
                            ex.printStackTrace();
                            filestruct.close();
                            continue;
                        }
                    }

                    if ( ssTable == null )
                    {
                        ssTable = new SSTable(compactionFileLocation, mergedFileName);
                    }
                    ssTable.append(lastkey, bufOut);

                    /* Fill the bloom filter with the key */
                    doFill(compactedBloomFilter, lastkey);
                    totalkeysWritten++;
                    for (FileStruct filestruct : lfs)
                    {
                        try
                        {
                            filestruct.advance();
                            if (filestruct.isExhausted())
                            {
                                continue;
                            }
                            pq.add(filestruct);
                            totalkeysRead++;
                        }
                        catch ( Throwable ex )
                        {
                            // Ignore the exception as it might be a corrupted file
                            // in any case we have read as far as possible from it
                            // and it will be deleted after compaction.
                            filestruct.close();
                        }
                    }
                    lfs.clear();
                    lastkey = null;
                    if (fs != null)
                    {
                        /* Add back the fs since we processed the rest of filestructs */
                        pq.add(fs);
                    }
                }
            }
            if ( ssTable != null )
            {
                ssTable.closeRename(compactedBloomFilter);
                newfile = ssTable.getDataFileLocation();
            }
            lock_.writeLock().lock();
            try
            {
                for (String file : files)
View Full Code Here

        /* Figure out the keys in the index file to relocate the node */
        List<String> ssTables = Table.open(table).getAllSSTablesOnDisk();
        /* Load the indexes into memory */
        for ( String df : ssTables )
        {
          SSTable ssTable = new SSTable(df);
          ssTable.close();
        }
        /* We should have only one file since we just compacted. */       
        List<String> indexedKeys = SSTable.getIndexedKeys();       
        storageService_.relocate(indexedKeys.toArray( new String[0]) );
       
View Full Code Here

            return;
        }

        String directory = DatabaseDescriptor.getDataFileLocation();
        String filename = cfStore.getNextFileName();
        SSTable ssTable = new SSTable(directory, filename);

        // sort keys in the order they would be in when decorated
        final IPartitioner partitioner = StorageService.getPartitioner();
        final Comparator<String> dc = partitioner.getDecoratedKeyComparator();
        ArrayList<String> orderedKeys = new ArrayList<String>(columnFamilies_.keySet());
        Collections.sort(orderedKeys, new Comparator<String>()
        {
            public int compare(String o1, String o2)
            {
                return dc.compare(partitioner.decorateKey(o1), partitioner.decorateKey(o2));
            }
        });
        DataOutputBuffer buffer = new DataOutputBuffer();
        /* Use this BloomFilter to decide if a key exists in a SSTable */
        BloomFilter bf = new BloomFilter(columnFamilies_.size(), 15);
        for (String key : orderedKeys)
        {
            buffer.reset();
            ColumnFamily columnFamily = columnFamilies_.get(key);
            if ( columnFamily != null )
            {
                /* serialize the cf with column indexes */
                ColumnFamily.serializerWithIndexes().serialize( columnFamily, buffer );
                /* Now write the key and value to disk */
                ssTable.append(partitioner.decorateKey(key), buffer);
                bf.add(key);
                columnFamily.clear();
            }
        }
        ssTable.close(bf);
        cfStore.onMemtableFlush(cLogCtx);
        cfStore.storeLocation( ssTable.getDataFileLocation(), bf );
        buffer.close();

        columnFamilies_.clear();
    }
View Full Code Here

                 * If the file is a Data File we need to load the indicies associated
                 * with this file. We also need to cache the file name in the SSTables
                 * list of the associated Column Family. Also merge the CBF into the
                 * sampler.
                */               
                SSTable ssTable = new SSTable(streamContext.getTargetFile() );
                ssTable.close();
                logger_.debug("Merging the counting bloom filter in the sampler ...");               
                String[] peices = FBUtilities.strip(fileName, "-");
                Table.open(peices[0]).getColumnFamilyStore(peices[1]).addToList(streamContext.getTargetFile());               
            }
           
View Full Code Here

        /*
         * Use the SSTable to write the contents of the TreeMap
         * to disk.
        */
        SSTable ssTable = new SSTable(directory, filename);
        List<String> keys = new ArrayList<String>( columnFamilies_.keySet() );
        Collections.sort(keys);       
        /* Use this BloomFilter to decide if a key exists in a SSTable */
        BloomFilter bf = new BloomFilter(keys.size(), 8);
        for ( String key : keys )
        {          
            byte[] bytes = columnFamilies_.get(key);
            if ( bytes.length > 0 )
            {             
                /* Now write the key and value to disk */
                ssTable.append(key, bytes);
                bf.add(key);
            }
        }
        ssTable.close(bf);
        cfStore.storeLocation( ssTable.getDataFileLocation(), bf );
        columnFamilies_.clear();      
    }
View Full Code Here

public class SSTableTest
{
    private static void rawSSTableWrite() throws Throwable
    {
        SSTable ssTable = new SSTable("C:\\Engagements\\Cassandra", "Table-Test-1");
        DataOutputBuffer bufOut = new DataOutputBuffer();
        BloomFilter bf = new BloomFilter(1000, 8);
        byte[] bytes = new byte[64*1024];
        Random random = new Random();
        for ( int i = 100; i < 1000; ++i )
        {
            String key = Integer.toString(i);
            ColumnFamily cf = new ColumnFamily("Test", "Standard");
            bufOut.reset();          
            // random.nextBytes(bytes);
            cf.addColumn("C", "Avinash Lakshman is a good man".getBytes(), i);
            ColumnFamily.serializerWithIndexes().serialize(cf, bufOut);
            ssTable.append(key, bufOut);           
            bf.add(key);
        }
        ssTable.close(bf);
    }
View Full Code Here

        ssTable.close(bf);
    }

    private static void readSSTable() throws Throwable
    {
        SSTable ssTable = new SSTable("C:\\Engagements\\Cassandra\\Table-Test-1-Data.db")
        for ( int i = 100; i < 1000; ++i )
        {
            String key = Integer.toString(i);           
            DataInputBuffer bufIn = ssTable.next(key, "Test:C");
            ColumnFamily cf = ColumnFamily.serializer().deserialize(bufIn);
            if ( cf != null )
            {           
                System.out.println("KEY:" + key);
                System.out.println(cf.name());
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.SSTable$KeyPosition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.