Package org.apache.flink.core.memory

Examples of org.apache.flink.core.memory.MemorySegment


      this.numBuckets = newNumBuckets;
      // initialize all new buckets
      boolean oldSegment = (startOffset != 0);
      final int startSegment = oldSegment ? (oldNumSegments-1) : oldNumSegments;
      for (int i = startSegment, bucket = oldNumBuckets; i < newNumSegments && bucket < this.numBuckets; i++) {
        MemorySegment seg;
        int bucketOffset = 0;
        if(oldSegment) { // the first couple of new buckets may be located on an old segment
          seg = this.buckets[i];
          for (int k = (oldNumBuckets % bucketsPerSegment) ; k < bucketsPerSegment && bucket < this.numBuckets; k++, bucket++) {
            bucketOffset = k * HASH_BUCKET_SIZE; 
            // initialize the header fields
            seg.put(bucketOffset + HEADER_PARTITION_OFFSET, assignPartition(bucket, (byte)numPartitions));
            seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
            seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
          }
        } else {
          seg = getNextBuffer();
          // go over all buckets in the segment
          for (int k = 0; k < bucketsPerSegment && bucket < this.numBuckets; k++, bucket++) {
            bucketOffset = k * HASH_BUCKET_SIZE; 
            // initialize the header fields
            seg.put(bucketOffset + HEADER_PARTITION_OFFSET, assignPartition(bucket, (byte)numPartitions));
            seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
            seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
          }
        }       
        this.buckets[i] = seg;
        oldSegment = false; // we write on at most one old segment
      }
      int hashOffset = 0;
      int hash = 0;
      int pointerOffset = 0;
      long pointer = 0;
      IntArrayList hashList = new IntArrayList(NUM_ENTRIES_PER_BUCKET);
      LongArrayList pointerList = new LongArrayList(NUM_ENTRIES_PER_BUCKET);
      IntArrayList overflowHashes = new IntArrayList(64);
      LongArrayList overflowPointers = new LongArrayList(64);
      // go over all buckets and split them between old and new buckets
      for(int i = 0; i < numPartitions; i++) {
        InMemoryPartition<T> partition = this.partitions.get(i);
        final MemorySegment[] overflowSegments = partition.overflowSegments;
        int posHashCode = 0;
        for (int j = 0, bucket = i; j < this.buckets.length && bucket < oldNumBuckets; j++) {
          MemorySegment segment = this.buckets[j];
          // go over all buckets in the segment belonging to the partition
          for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < oldNumBuckets; k += numPartitions, bucket += numPartitions) {
            int bucketOffset = k * HASH_BUCKET_SIZE;
            if((int)segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != i) {
              throw new IOException("Accessed wrong bucket! wanted: " + i + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
            }
            // loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
            int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
            int numInSegment = 0;
            pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
            hashOffset = bucketOffset + BUCKET_HEADER_LENGTH;
            while (true) {
              while (numInSegment < countInSegment) {
                hash = segment.getInt(hashOffset);
                if((hash % this.numBuckets) != bucket && (hash % this.numBuckets) != (bucket+oldNumBuckets)) {
                  throw new IOException("wanted: " + bucket + " or " + (bucket + oldNumBuckets) + " got: " + hash%this.numBuckets);
                }
                pointer = segment.getLong(pointerOffset);
                hashList.add(hash);
                pointerList.add(pointer);
                pointerOffset += POINTER_LEN;
                hashOffset += HASH_CODE_LEN;
                numInSegment++;
              }
              // this segment is done. check if there is another chained bucket
              final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
              if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
                break;
              }
              final int overflowSegNum = (int) (forwardPointer >>> 32);
              segment = overflowSegments[overflowSegNum];
              bucketOffset = (int)(forwardPointer & 0xffffffff);
              countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
              pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
              hashOffset = bucketOffset + BUCKET_HEADER_LENGTH;
              numInSegment = 0;
            }
            segment = this.buckets[j];
            bucketOffset = k * HASH_BUCKET_SIZE;
            // reset bucket for re-insertion
            segment.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
            segment.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
            // refill table
            if(hashList.size() != pointerList.size()) {
              throw new IOException("Pointer and hash counts do not match. hashes: " + hashList.size() + " pointer: " + pointerList.size());
            }
            int newSegmentIndex = (bucket + oldNumBuckets) / bucketsPerSegment;
            MemorySegment newSegment = this.buckets[newSegmentIndex];
            // we need to avoid overflows in the first run
            int oldBucketCount = 0;
            int newBucketCount = 0;
            while(!hashList.isEmpty()) {
              hash = hashList.removeInt(hashList.size()-1);
              pointer = pointerList.removeLong(pointerList.size()-1);
              posHashCode = hash % this.numBuckets;
              if(posHashCode == bucket && oldBucketCount < NUM_ENTRIES_PER_BUCKET) {
                bucketOffset = (bucket % bucketsPerSegment) * HASH_BUCKET_SIZE;
                insertBucketEntryFromStart(partition, segment, bucketOffset, hash, pointer);
                oldBucketCount++;
              } else if(posHashCode == (bucket + oldNumBuckets) && newBucketCount < NUM_ENTRIES_PER_BUCKET) {
                bucketOffset = ((bucket + oldNumBuckets) % bucketsPerSegment) * HASH_BUCKET_SIZE;
                insertBucketEntryFromStart(partition, newSegment, bucketOffset, hash, pointer);
                newBucketCount++;
              } else if(posHashCode == (bucket + oldNumBuckets) || posHashCode == bucket) {
                overflowHashes.add(hash);
                overflowPointers.add(pointer);
              } else {
                throw new IOException("Accessed wrong bucket. Target: " + bucket + " or " + (bucket + oldNumBuckets) + " Hit: " + posHashCode);
              }
            }
            hashList.clear();
            pointerList.clear();
          }
        }
        // reset partition's overflow buckets and reclaim their memory
        this.availableMemory.addAll(partition.resetOverflowBuckets());
        // clear overflow lists
        int bucketArrayPos = 0;
        int bucketInSegmentPos = 0;
        MemorySegment bucket = null;
        while(!overflowHashes.isEmpty()) {
          hash = overflowHashes.removeInt(overflowHashes.size()-1);
          pointer = overflowPointers.removeLong(overflowPointers.size()-1);
          posHashCode = hash % this.numBuckets;
          bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits;
View Full Code Here


    long pointer = 0L;
    int pointerOffset = 0;
    int bucketOffset = 0;
    final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
    for (int i = 0, bucket = partitionNumber; i < this.buckets.length && bucket < this.numBuckets; i++) {
      MemorySegment segment = this.buckets[i];
      // go over all buckets in the segment belonging to the partition
      for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < this.numBuckets; k += numPartitions, bucket += numPartitions) {
        bucketOffset = k * HASH_BUCKET_SIZE;
        if((int)segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
          throw new IOException("Accessed wrong bucket! wanted: " + partitionNumber + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
        }
        // loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
        int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
        int numInSegment = 0;
        pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
        while (true) {
          while (numInSegment < countInSegment) {
            pointer = segment.getLong(pointerOffset);
            tempHolder = partition.readRecordAt(pointer, tempHolder);
            pointer = this.compactionMemory.appendRecord(tempHolder);
            segment.putLong(pointerOffset, pointer);
            pointerOffset += POINTER_LEN;
            numInSegment++;
          }
          // this segment is done. check if there is another chained bucket
          final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
          if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
            break;
          }
          final int overflowSegNum = (int) (forwardPointer >>> 32);
          segment = overflowSegments[overflowSegNum];
          bucketOffset = (int)(forwardPointer & 0xffffffff);
          countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
          pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
          numInSegment = 0;
        }
        segment = this.buckets[i];
      }
View Full Code Here

     */
    private boolean fillCache() throws IOException {
      if(currentBucketIndex >= table.numBuckets) {
        return false;
      }
      MemorySegment bucket = table.buckets[currentSegmentIndex];
      // get the basic characteristics of the bucket
      final int partitionNumber = bucket.get(currentBucketOffset + HEADER_PARTITION_OFFSET);
      final InMemoryPartition<T> partition = table.partitions.get(partitionNumber);
      final MemorySegment[] overflowSegments = partition.overflowSegments;
     
      int countInSegment = bucket.getInt(currentBucketOffset + HEADER_COUNT_OFFSET);
      int numInSegment = 0;
      int posInSegment = currentBucketOffset + BUCKET_POINTER_START_OFFSET;
      int bucketOffset = currentBucketOffset;

      // loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
      while (true) {
        while (numInSegment < countInSegment) {
          long pointer = bucket.getLong(posInSegment);
          posInSegment += POINTER_LEN;
          numInSegment++;
          T target = table.buildSideSerializer.createInstance();
          try {
            target = partition.readRecordAt(pointer, target);
            cache.add(target);
          } catch (IOException e) {
              throw new RuntimeException("Error deserializing record from the Hash Table: " + e.getMessage(), e);
          }
        }
        // this segment is done. check if there is another chained bucket
        final long forwardPointer = bucket.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
        if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
          break;
        }
        final int overflowSegNum = (int) (forwardPointer >>> 32);
        bucket = overflowSegments[overflowSegNum];
        bucketOffset = (int)(forwardPointer & 0xffffffff);
        countInSegment = bucket.getInt(bucketOffset + HEADER_COUNT_OFFSET);
        posInSegment = bucketOffset + BUCKET_POINTER_START_OFFSET;
        numInSegment = 0;
      }
      currentBucketIndex++;
      if(currentBucketIndex % bucketsPerSegment == 0) {
View Full Code Here

      final int searchHashCode = hash(this.probeTypeComparator.hash(probeSideRecord));
     
      final int posHashCode = searchHashCode % numBuckets;
     
      // get the bucket for the given hash code
      MemorySegment bucket = buckets[posHashCode >> bucketsPerSegmentBits];
      int bucketInSegmentOffset = (posHashCode & bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
     
      // get the basic characteristics of the bucket
      final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
      final InMemoryPartition<T> p = partitions.get(partitionNumber);
      final MemorySegment[] overflowSegments = p.overflowSegments;
     
      this.pairComparator.setReference(probeSideRecord);
     
      int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
      int numInSegment = 0;
      int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

      // loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
      while (true) {
       
        while (numInSegment < countInSegment) {
         
          final int thisCode = bucket.getInt(posInSegment);
          posInSegment += HASH_CODE_LEN;
           
          // check if the hash code matches
          if (thisCode == searchHashCode) {
            // get the pointer to the pair
            final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInSegment * POINTER_LEN);
            final long pointer = bucket.getLong(pointerOffset);
            numInSegment++;
           
            // deserialize the key to check whether it is really equal, or whether we had only a hash collision
            try {
              targetForMatch = p.readRecordAt(pointer, targetForMatch);
             
              if (this.pairComparator.equalToReference(targetForMatch)) {
                this.partition = p;
                this.bucket = bucket;
                this.pointerOffsetInBucket = pointerOffset;
                return targetForMatch;
              }
            }
            catch (IOException e) {
              throw new RuntimeException("Error deserializing record from the hashtable: " + e.getMessage(), e);
            }
          }
          else {
            numInSegment++;
          }
        }
       
        // this segment is done. check if there is another chained bucket
        final long forwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
        if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
          return null;
        }
       
        final int overflowSegNum = (int) (forwardPointer >>> 32);
        bucket = overflowSegments[overflowSegNum];
        bucketInSegmentOffset = (int) (forwardPointer & 0xffffffff);
        countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
        posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
        numInSegment = 0;
      }
    }
View Full Code Here

   *
   * @param numberOfSegments allocation count
   */
  public void allocateSegments(int numberOfSegments) {
    while(getBlockCount() < numberOfSegments) {
      MemorySegment next = this.availableMemory.nextSegment();
      if(next != null) {
        this.partitionPages.add(next);
      } else {
        return;
      }
View Full Code Here

    }
   

    @Override
    protected MemorySegment nextSegment(MemorySegment current, int bytesUsed) throws IOException {
      MemorySegment next = this.memSource.nextSegment();
      if(next == null) {
        throw new EOFException();
      }
      this.pages.add(next);
     
View Full Code Here

        readEnds.remove(i);
      }
    }

    // add the current memorySegment and reset this writer
    final MemorySegment current = getCurrentSegment();
    current.putInt(0, getCurrentPositionInSegment());
    fullBuffers.addLast(current);

    // create the reader
    final ReadEnd readEnd;
    if (numBuffersSpilled == 0 && emptyBuffers.size() >= minBuffersForWriteEnd) {
      // read completely from in-memory segments
      readEnd = new ReadEnd(fullBuffers.removeFirst(), emptyBuffers, fullBuffers, null, null, segmentSize, 0);
    } else {
      int toSpill = Math.min(minBuffersForSpilledReadEnd + minBuffersForWriteEnd - emptyBuffers.size(),
        fullBuffers.size());

      // reader reads also segments on disk
      // grab some empty buffers to re-read the first segment
      if (toSpill > 0) {
        // need to spill to make a buffers available
        if (currentWriter == null) {
          currentWriter = ioManager.createBlockChannelWriter(channelEnumerator.next(), emptyBuffers);
        }

        for (int i = 0; i < toSpill; i++) {
          currentWriter.writeBlock(fullBuffers.removeFirst());
        }
        numBuffersSpilled += toSpill;
      }

      // now close the writer and create the reader
      currentWriter.close();
      final BlockChannelReader reader = ioManager.createBlockChannelReader(currentWriter.getChannelID());

      // gather some memory segments to circulate while reading back the data
      final ArrayList<MemorySegment> readSegments = new ArrayList<MemorySegment>();
      try {
        while (readSegments.size() < minBuffersForSpilledReadEnd) {
          readSegments.add(emptyBuffers.take());
        }

        // read the first segment
        MemorySegment firstSeg = readSegments.remove(readSegments.size() - 1);
        reader.readBlock(firstSeg);
        firstSeg = reader.getReturnQueue().take();

        // create the read end reading one less buffer, because the first buffer is already read back
        readEnd = new ReadEnd(firstSeg, emptyBuffers, fullBuffers, reader, readSegments, segmentSize,
View Full Code Here

      return false;
    }

    private void forceDispose(List<MemorySegment> freeMemTarget) throws InterruptedException {
      // add the current segment
      final MemorySegment current = getCurrentSegment();
      clear();
      if (current != null) {
        freeMemTarget.add(current);
      }
View Full Code Here

    final int memRequiredMb = (numBuffers * bufferSize) / mb;

    for (int i = 0; i < numBuffers; i++) {
      try {
        byte[] buf = new byte[bufferSize];
        buffers.add(new MemorySegment(buf));
      } catch (OutOfMemoryError err) {
        int memAllocatedMb = ((i + 1) * bufferSize) / mb;

        String msg = String.format("Tried to allocate %d buffers of size %d bytes each (total: %d MB) " +
            "and ran out of memory after %d buffers (%d MB).",
View Full Code Here

    final int posHashCode = hashCode % this.numBuckets;
   
    // get the bucket for the given hash code
    final int bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits;
    final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
    final MemorySegment bucket = this.buckets[bucketArrayPos];
   
    // get the basic characteristics of the bucket
    final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
    InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
   
   
    long pointer;
    try {
View Full Code Here

TOP

Related Classes of org.apache.flink.core.memory.MemorySegment

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.