Package java.util.zip

Examples of java.util.zip.Adler32.update()


                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                  Checksum checksum = new Adler32();
                  checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                  buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
View Full Code Here


                // factor in this case since we know
                // our write batches are going to much larger.
                Checksum checksum = new Adler32();
                for (PageWrite w : batch) {
                    try {
                        checksum.update(w.diskBound, 0, pageSize);
                    } catch (Throwable t) {
                        throw IOExceptionSupport.create(
                                "Cannot create recovery file. Reason: " + t, t);
                    }
                }
View Full Code Here

                byte []data = new byte[pageSize];
                if( recoveryFile.read(data, 0, pageSize) != pageSize ) {
                    // Invalid recovery record, Could not fully read the data". Probably due to a partial write to the recovery buffer
                    return nextTxId;
                }
                checksum.update(data, 0, pageSize);
                batch.put(offset, data);
            }
        } catch (Exception e) {
            // If an error occurred it was cause the redo buffer was not full written out correctly.. so don't redo it.
            // as the pages should still be consistent.
View Full Code Here

  @Benchmark byte adler32Checksum(int reps) throws Exception {
    byte result = 0x01;
    for (int i = 0; i < reps; i++) {
      Adler32 checksum = new Adler32();
      checksum.update(testBytes);
      result ^= checksum.getValue();
    }
    return result;
  }
View Full Code Here

           // Using Adler-32 instead of CRC-32 because it's much faster and it's
           // weakness for short messages with few hundred bytes is not a factor in this case since we know
           // our write batches are going to much larger.
           Checksum checksum = new Adler32();
           for (PageWrite w : batch) {
               checksum.update(w.diskBound, 0, pageSize);
           }
          
           // Can we shrink the recovery buffer??
           if( recoveryPageCount > recoveryFileMaxPageCount ) {
               int t = Math.max(recoveryFileMinPageCount, batch.size());
View Full Code Here

                byte []data = new byte[pageSize];
                if( recoveryFile.read(data, 0, pageSize) != pageSize ) {
                    // Invalid recovery record, Could not fully read the data". Probably due to a partial write to the recovery buffer
                    return nextTxId;
                }
                checksum.update(data, 0, pageSize);
                batch.put(offset, data);
            }
        } catch (Exception e) {
            // If an error occurred it was cause the redo buffer was not full written out correctly.. so don't redo it.
            // as the pages should still be consistent.
View Full Code Here

                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                  Checksum checksum = new Adler32();
                  checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                  buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
View Full Code Here

         */
        Collections.sort(uris);

        final Adler32 checksum = new Adler32();
        for (URI uri : uris) {
            checksum.update(uri.toASCIIString().getBytes());
        }
        return checksum.getValue();
    }

    /**
 
View Full Code Here

                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                  Checksum checksum = new Adler32();
                  checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                  buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
View Full Code Here

                byte[] data = new byte[pageSize];
                if (recoveryFile.read(data, 0, pageSize) != pageSize) {
                    // Invalid recovery record, Could not fully read the data". Probably due to a partial write to the recovery buffer
                    return nextTxId;
                }
                checksum.update(data, 0, pageSize);
                batch.put(offset, data);
            }
        } catch (Exception e) {
            // If an error occurred it was cause the redo buffer was not full written out correctly.. so don't redo it.
            // as the pages should still be consistent.
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.