Package org.apache.cassandra.utils

Examples of org.apache.cassandra.utils.BloomFilter


              DataOutputBuffer bufOut = new DataOutputBuffer();
              int expectedBloomFilterSize = SSTable.getApproximateKeyCount(files);
              expectedBloomFilterSize = (expectedBloomFilterSize > 0) ? expectedBloomFilterSize : SSTable.indexInterval();
              logger_.debug("Expected bloom filter size : " + expectedBloomFilterSize);
              /* Create the bloom filter for the compacted file. */
              BloomFilter compactedRangeBloomFilter = new BloomFilter(expectedBloomFilterSize, 15);
              List<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>();

              while (pq.size() > 0 || lfs.size() > 0)
              {
                  FileStruct fs = null;
                  if (pq.size() > 0)
                  {
                      fs = pq.poll();
                  }
                  if (fs != null
                          && (lastkey == null || lastkey.equals(fs.getKey())))
                  {
                      // The keys are the same so we need to add this to the
                      // ldfs list
                      lastkey = fs.getKey();
                      lfs.add(fs);
                  }
                  else
                  {
                      Collections.sort(lfs, new FileStructComparator());
                      ColumnFamily columnFamily;
                      bufOut.reset();
                      if(lfs.size() > 1)
                      {
                        for (FileStruct filestruct : lfs)
                        {
                          try
                          {
                                  /* read the length although we don't need it */
                                  filestruct.getBufIn().readInt();
                                  // Skip the Index
                                    IndexHelper.skipBloomFilterAndIndex(filestruct.getBufIn());
                                  // We want to add only 2 and resolve them right there in order to save on memory footprint
                                  if(columnFamilies.size() > 1)
                                  {
                                  // Now merge the 2 column families
                                        merge(columnFamilies);
                                  }
                              // deserialize into column families
                              columnFamilies.add(ColumnFamily.serializer().deserialize(filestruct.getBufIn()));
                          }
                          catch ( Exception ex)
                          {
                                    logger_.warn(LogUtil.throwableToString(ex));
                                }
                        }
                        // Now after merging all crap append to the sstable
                        columnFamily = resolveAndRemoveDeleted(columnFamilies);
                        columnFamilies.clear();
                        if( columnFamily != null )
                        {
                        /* serialize the cf with column indexes */
                          ColumnFamily.serializerWithIndexes().serialize(columnFamily, bufOut);
                        }
                      }
                      else
                      {
                        FileStruct filestruct = lfs.get(0);
                        try
                        {
                            /* read the length although we don't need it */
                            int size = filestruct.getBufIn().readInt();
                            bufOut.write(filestruct.getBufIn(), size);
                        }
                        catch ( Exception ex)
                        {
                          logger_.warn(LogUtil.throwableToString(ex));
                              filestruct.close();
                              continue;
                        }
                      }
                      if ( Range.isKeyInRanges(ranges, p.undecorateKey(lastkey)) )
                      {
                          if(ssTableRange == null )
                          {
                            if( target != null )
                              rangeFileLocation = rangeFileLocation + System.getProperty("file.separator") + "bootstrap";
                            FileUtils.createDirectory(rangeFileLocation);
                              ssTableRange = new SSTable(rangeFileLocation, mergedFileName);
                          }                         
                          try
                          {
                            ssTableRange.append(lastkey, bufOut);
                            compactedRangeBloomFilter.add(lastkey);
                          }
                          catch(Exception ex)
                          {
                              logger_.warn( LogUtil.throwableToString(ex) );
                          }
View Full Code Here


            DataOutputBuffer bufOut = new DataOutputBuffer();
            int expectedBloomFilterSize = SSTable.getApproximateKeyCount(files);
            expectedBloomFilterSize = (expectedBloomFilterSize > 0) ? expectedBloomFilterSize : SSTable.indexInterval();
            logger_.debug("Expected bloom filter size : " + expectedBloomFilterSize);
            /* Create the bloom filter for the compacted file. */
            BloomFilter compactedBloomFilter = new BloomFilter(expectedBloomFilterSize, 15);
            List<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>();

            while (pq.size() > 0 || lfs.size() > 0)
            {
                FileStruct fs = null;
View Full Code Here

                return dc.compare(partitioner.decorateKey(o1), partitioner.decorateKey(o2));
            }
        });
        DataOutputBuffer buffer = new DataOutputBuffer();
        /* Use this BloomFilter to decide if a key exists in a SSTable */
        BloomFilter bf = new BloomFilter(columnFamilies_.size(), 15);
        for (String key : orderedKeys)
        {
            buffer.reset();
            ColumnFamily columnFamily = columnFamilies_.get(key);
            if ( columnFamily != null )
            {
                /* serialize the cf with column indexes */
                ColumnFamily.serializerWithIndexes().serialize( columnFamily, buffer );
                /* Now write the key and value to disk */
                ssTable.append(partitioner.decorateKey(key), buffer);
                bf.add(key);
                columnFamily.clear();
            }
        }
        ssTable.close(bf);
        cfStore.onMemtableFlush(cLogCtx);
View Full Code Here

   * @throws IOException
   */
    public static void serialize(ColumnFamily columnFamily, DataOutput dos)
  {
        Collection<IColumn> columns = columnFamily.getSortedColumns();
        BloomFilter bf = createColumnBloomFilter(columns);                   
        /* Write out the bloom filter. */
        DataOutputBuffer bufOut = new DataOutputBuffer();
        try
        {
            BloomFilter.serializer().serialize(bf, bufOut);
View Full Code Here

        for (IColumn column : columns)
        {
            columnCount += column.getObjectCount();
        }

        BloomFilter bf = BloomFilter.getFilter(columnCount, 4);
        for (IColumn column : columns)
        {
            bf.add(column.name());
            /* If this is SuperColumn type Column Family we need to get the subColumns too. */
            if (column instanceof SuperColumn)
            {
                Collection<IColumn> subColumns = column.getSubColumns();
                for (IColumn subColumn : subColumns)
                {
                    bf.add(subColumn.name());
                }
            }
        }
        return bf;
    }
View Full Code Here

            DecoratedKey keyInDisk = ssTable.getPartitioner().convertFromDiskFormat(file.readUTF());
            assert keyInDisk.equals(decoratedKey) : keyInDisk;
            file.readInt(); // data size

            /* Read the bloom filter summarizing the columns */
            BloomFilter bf = IndexHelper.defreezeBloomFilter(file);
            List<byte[]> filteredColumnNames = new ArrayList<byte[]>(columnNames.size());
            for (byte[] name : columnNames)
            {
                if (bf.isPresent(name))
                {
                    filteredColumnNames.add(name);
                }
            }
            if (filteredColumnNames.isEmpty())
View Full Code Here

            DecoratedKey keyInDisk = ssTable.getPartitioner().convertFromDiskFormat(file.readUTF());
            assert keyInDisk.equals(decoratedKey) : keyInDisk;
            file.readInt(); // data size

            /* Read the bloom filter summarizing the columns */
            BloomFilter bf = IndexHelper.defreezeBloomFilter(file);
            List<byte[]> filteredColumnNames = new ArrayList<byte[]>(columnNames.size());
            for (byte[] name : columnNames)
            {
                if (bf.isPresent(name))
                {
                    filteredColumnNames.add(name);
                }
            }
            if (filteredColumnNames.isEmpty())
View Full Code Here

   * @throws IOException
   */
    public static void serialize(ColumnFamily columnFamily, DataOutput dos)
  {
        Collection<IColumn> columns = columnFamily.getSortedColumns();
        BloomFilter bf = createColumnBloomFilter(columns);                   
        /* Write out the bloom filter. */
        DataOutputBuffer bufOut = new DataOutputBuffer();
        try
        {
            BloomFilter.serializer().serialize(bf, bufOut);
View Full Code Here

        for (IColumn column : columns)
        {
            columnCount += column.getObjectCount();
        }

        BloomFilter bf = new BloomFilter(columnCount, 4);
        for (IColumn column : columns)
        {
            bf.add(column.name());
            /* If this is SuperColumn type Column Family we need to get the subColumns too. */
            if (column instanceof SuperColumn)
            {
                Collection<IColumn> subColumns = column.getSubColumns();
                for (IColumn subColumn : subColumns)
                {
                    bf.add(subColumn.name());
                }
            }
        }
        return bf;
    }
View Full Code Here

    public SSTableWriter(String filename, long keyCount, IPartitioner partitioner) throws IOException
    {
        super(filename, partitioner);
        dataFile = new BufferedRandomAccessFile(path, "rw", (int)(DatabaseDescriptor.getFlushDataBufferSizeInMB() * 1024 * 1024));
        indexFile = new BufferedRandomAccessFile(indexFilename(), "rw", (int)(DatabaseDescriptor.getFlushIndexBufferSizeInMB() * 1024 * 1024));
        bf = new BloomFilter((int)keyCount, 15); // TODO fix long -> int cast
    }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.utils.BloomFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.