Package org.apache.kahadb.util

Examples of org.apache.kahadb.util.ByteSequence


        store.setDirectory(directory);
    final int NUMBER = 1000;
    store.start();
    List<ByteSequence>list = new ArrayList<ByteSequence>();
    for (int i = 0; i < NUMBER;i++ ) {
            ByteSequence buff = new ByteSequence(new String("testjob"+i).getBytes());
            list.add(buff);    
        }
    JobScheduler js = store.getJobScheduler("test");
    int count = 0;
    long startTime = 10 * 60 * 1000; long period = startTime;
View Full Code Here


            Object repeatValue = msg.getProperty(ScheduledMessage.AMQ_SCHEDULED_REPEAT);
            if (repeatValue != null) {
                repeat = (Integer) TypeConversionSupport.convert(repeatValue, Integer.class);
            }
            getInternalScheduler().schedule(msg.getMessageId().toString(),
                    new ByteSequence(packet.data, packet.offset, packet.length),cronEntry, delay, period, repeat);

        } else {
            super.send(producerExchange, messageSend);
        }
    }
View Full Code Here

        }

    }

    synchronized ByteSequence getPayload(Location location) throws IllegalStateException, IOException {
        ByteSequence result = null;
        result = this.journal.read(location);
        return result;
    }
View Full Code Here

        try {
            DataByteArrayOutputStream os = new DataByteArrayOutputStream();
            os.writeInt(BATCH_CONTROL_RECORD_SIZE);
            os.writeByte(BATCH_CONTROL_RECORD_TYPE);
            os.write(BATCH_CONTROL_RECORD_MAGIC);
            ByteSequence sequence = os.toByteSequence();
            sequence.compact();
            return sequence.getData();
        } catch (IOException e) {
            throw new RuntimeException("Could not create batch control record header.", e);
        }
    }
View Full Code Here

        return location;
    }

    private int findNextBatchRecord(DataFileAccessor reader, int offset) throws IOException {
        ByteSequence header = new ByteSequence(BATCH_CONTROL_RECORD_HEADER);
        byte data[] = new byte[1024*4];
        ByteSequence bs = new ByteSequence(data, 0, reader.read(offset, data));

        int pos = 0;
        while( true ) {
            pos = bs.indexOf(header, pos);
            if( pos >= 0 ) {
                return offset+pos;
            } else {
                // need to load the next data chunck in..
                if( bs.length != data.length ) {
                    // If we had a short read then we were at EOF
                    return -1;
                }
                offset += bs.length-BATCH_CONTROL_RECORD_HEADER.length;
                bs = new ByteSequence(data, 0, reader.read(offset, data));
                pos=0;
            }
        }
    }
View Full Code Here

    }

    public synchronized ByteSequence read(Location location) throws IOException, IllegalStateException {
        DataFile dataFile = getDataFile(location);
        DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
        ByteSequence rc = null;
        try {
            rc = reader.readRecord(location);
        } finally {
            accessorPool.closeDataFileAccessor(reader);
        }
View Full Code Here

                    buff.writeByte(write.location.getType());
                    buff.write(write.data.getData(), write.data.getOffset(), write.data.getLength());
                    write = write.getNext();
                }

                ByteSequence sequence = buff.toByteSequence();

                // Now we can fill in the batch control record properly.
                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                    Checksum checksum = new Adler32();
                    checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                    buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
                if (maxStat > 0) {
                    if (statIdx < maxStat) {
                        stats[statIdx++] = sequence.getLength();
                    } else {
                        long all = 0;
                        for (;statIdx > 0;) {
                            all+= stats[--statIdx];
                        }
                        logger.info("Ave writeSize: " + all/maxStat);
                    }
                }
                file.write(sequence.getData(), sequence.getOffset(), sequence.getLength());

                ReplicationTarget replicationTarget = journal.getReplicationTarget();
                if( replicationTarget!=null ) {
                    replicationTarget.replicate(wb.writes.getHead().location, sequence, forceToDisk);
                }
View Full Code Here

                file.seek(location.getOffset() + Journal.RECORD_HEAD_SPACE);
            }

            byte[] data = new byte[location.getSize() - Journal.RECORD_HEAD_SPACE];
            file.readFully(data);
            return new ByteSequence(data, 0, data.length);

        } catch (RuntimeException e) {
            throw new IOException("Invalid location: " + location + ", : " + e);
        }
    }
View Full Code Here

                }

                final DataByteArrayOutputStream buff = wb.buff;
                final boolean forceToDisk = wb.forceToDisk;

                ByteSequence sequence = buff.toByteSequence();
               
                // Now we can fill in the batch control record properly.
                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                  Checksum checksum = new Adler32();
                  checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                  buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
                if (maxStat > 0) {
                    if (statIdx < maxStat) {
                        stats[statIdx++] = sequence.getLength();
                    } else {
                        long all = 0;
                        for (;statIdx > 0;) {
                            all+= stats[--statIdx];
                        }
                        System.err.println("Ave writeSize: " + all/maxStat);
                    }
                }
                file.write(sequence.getData(), sequence.getOffset(), sequence.getLength());
                ReplicationTarget replicationTarget = journal.getReplicationTarget();
                if( replicationTarget!=null ) {
                  replicationTarget.replicate(wb.writes.getHead().location, sequence, forceToDisk);
                }
               
View Full Code Here

    public void testAddLast() throws Exception {
        final int COUNT = 1000;
        Map<String, ByteSequence> map = new LinkedHashMap<String, ByteSequence>();
        for (int i = 0; i < COUNT; i++) {
            String test = new String("test" + i);
            ByteSequence bs = new ByteSequence(test.getBytes());
            map.put(test, bs);
            plist.addLast(test, bs);
        }
        assertEquals(plist.size(), COUNT);
        int count = 0;
        for (ByteSequence bs : map.values()) {
            String origStr = new String(bs.getData(), bs.getOffset(), bs.getLength());
            PListEntry entry = plist.get(count);
            String plistString = new String(entry.getByteSequence().getData(), entry.getByteSequence().getOffset(),
                    entry.getByteSequence().getLength());
            assertEquals(origStr, plistString);
            count++;
View Full Code Here

TOP

Related Classes of org.apache.kahadb.util.ByteSequence

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.