Package org.mapdb

Examples of org.mapdb.DB


* Variable is atomically incremented and persisted after JVM shutdown.
*
*/
public class SQL_Auto_Incremental_Unique_Key {
    public static void main(String[] args) {
        DB db = DBMaker.newTempFileDB().make();

        //open or create new map
        Map<Long, String> map = db.getTreeMap("map");

        // open existing or create new Atomic record with given name
        // if no record with given name exist, new recid is created with value `0`
        Atomic.Long keyinc = db.getAtomicLong("map_keyinc");


        // Allocate new unique key to use in map
        // Atomic.Long will use `compare-and-swap` operation to atomically store incremented value
        // Key values can be used only for single insert
View Full Code Here




    public static void main(String[] args) {
        //init off-heap store with 2GB size limit
        DB db = DBMaker
                .newMemoryDirectDB()    //use off-heap memory, on-heap is `.newMemoryDB()`
                .sizeLimit(2)           //limit store size to 2GB
                .transactionDisable()   //better performance
                .make();

        //create map, entries are expired if not accessed (get,iterate) for 10 seconds or 30 seconds after 'put'
        //There is also maximal size limit to prevent OutOfMemoryException
        HTreeMap map = db
                .createHashMap("cache")
                .expireMaxSize(1000000)
                .expireAfterWrite(30, TimeUnit.SECONDS)
                .expireAfterAccess(10, TimeUnit.SECONDS)
                .make();
View Full Code Here

        TxMaker txMaker = DBMaker
                .newMemoryDB()
                .makeTxMaker();

        // Now open first transaction and get map from first transaction
        DB tx1 = txMaker.makeTx();

        //create map from first transactions and fill it with data
        Map map1 = tx1.getTreeMap("testMap");
        for(int i=0;i<1e4;i++){
            map1.put(i,"aaa"+i);
        }

        //commit first transaction
        tx1.commit();

        // !! IMPORTANT !!
        // !! DB transaction can be used only once,
        // !! it throws an 'already closed' exception after it was commited/rolledback
        // !! IMPORTANT !!
        //map1.put(1111,"dqdqwd"); // this will fail

        //open second transaction
        DB tx2 = txMaker.makeTx();
        Map map2 = tx2.getTreeMap("testMap");

        //open third transaction
        DB tx3 = txMaker.makeTx();
        Map map3 = tx3.getTreeMap("testMap");

        //put some stuff into second transactions, observer third map size
        System.out.println("map3 size before insert: "+map3.size());
        map2.put(-10, "exists");
        System.out.println("map3 size after insert: "+map3.size());

        //put some stuff into third transactions, observer second map size
        System.out.println("map2 size before insert: "+map2.size());
        map3.put(100000, "exists");
        System.out.println("map2 size after insert: "+map2.size());

        // so far there was no conflict, since modified Map values lie far away from each other in tree.
        // `map2` has new key -10, so inserting -11 into map3 should update the same node
        map3.put(-11, "exists");
        // `map2` and `map3` now have conflicting data
        tx3.commit();
        System.out.println("Insert -11 into map3 was fine");

        //tx3 was commited, but tx2 now has conflicting data, so its commit will fail
        try{
            tx2.commit();
            throw new Error("Should not be here");
        }catch(TxRollbackException e){
            System.out.println("Tx2 commit failed thanks to conflict, tx2 was rolled back");
        }

        //create yet another transaction and observe result
        DB tx4 = txMaker.makeTx();
        Map map4 = tx4.getTreeMap("testMap");
        System.out.println("Map size after commits: "+map4.size());
        System.out.println("Value inserted into tx2 and successfully commited: "+map4.get(-10));
        System.out.println("Value inserted into tx3 before rollback: "+map4.get(100000));
        System.out.println("Value inserted into tx3 which triggered rollback: "+map4.get(-11));

        //close transaction without modifying anything
        tx4.close();

        //close the entire database
        txMaker.close();
    }
View Full Code Here

    public static void main(String[] args) {
        /*
         * first case, just enable storage wide compression for all records.
         */
        DB db = DBMaker.newMemoryDB()
                .compressionEnable() //this settings enables compression
                .make();
        //and now create and use map as usual
        Map map = db.getTreeMap("test");
        map.put("some","stuff");



        /*
         * Other option is to use compression only for specific part. For example if
         * you have large values, you may want to compress them. It may make sense
         * not to compress BTree Nodes and Keys.
         */
        DB db2 = DBMaker.newMemoryDB().make(); //no store wide compression this time

        //construct value serializier, use default serializier
        Serializer valueSerializer = db2.getDefaultSerializer();
        //but wrap it, to compress its output
        valueSerializer = new Serializer.CompressionWrapper(valueSerializer);

        //now construct map, with additional options
        Map map2 = db2.createTreeMap("test")
                .valuesOutsideNodesEnable() // store values outside of BTree Nodes. Faster reads if values are large.
                .valueSerializer(valueSerializer) //set our value serializer.
                .make();

        map2.put("some","stuff");
View Full Code Here

    public static void main(String[] args) throws IOException {

        // Open db in temp directory
        File f = File.createTempFile("mapdb","temp");
    DB db = DBMaker.newFileDB(f)
        .make();
   
    // Open or create table
    Map<String,Person> dbMap = db.getTreeMap("personAndCity");
   
    // Add data
    Person bilbo = new Person("Bilbo","The Shire");
    Person sauron = new Person("Sauron","Mordor");
    Person radagast = new Person("Radagast","Crazy Farm");
   
    dbMap.put("west",bilbo);
    dbMap.put("south",sauron);
    dbMap.put("mid",radagast);

    // Commit and close
    db.commit();
    db.close();


        //
        // Second option for using cystom values is to use your own serializer.
        // This usually leads to better performance as MapDB does not have to
        // analyze the class structure.
        //

        class CustomSerializer implements Serializer<Person>, Serializable{

            @Override
            public void serialize(DataOutput out, Person value) throws IOException {
                out.writeUTF(value.getName());
                out.writeUTF(value.getCity());
            }

            @Override
            public Person deserialize(DataInput in, int available) throws IOException {
                return new Person(in.readUTF(), in.readUTF());
            }

            @Override
            public int fixedSize() {
                return -1;
            }

        }

        Serializer<Person> serializer = new CustomSerializer();

        DB db2 = DBMaker.newTempFileDB().make();

        Map<String,Person> map2 = db2.createHashMap("map").valueSerializer(serializer).make();

        map2.put("North", new Person("Yet another dwarf","Somewhere"));

        db2.commit();
        db2.close();


  }
View Full Code Here

    public static void main(String[] args) throws IOException {

        //Configure and open database using builder pattern.
        //All options are available with code auto-completion.
        File dbFile = File.createTempFile("mapdb","db");
        DB db = DBMaker.newFileDB(dbFile)
                .closeOnJvmShutdown()
                .encryptionEnable("password")
                .make();

        //open an collection, TreeMap has better performance then HashMap
        ConcurrentNavigableMap<Integer,String> map = db.getTreeMap("collectionName");

        map.put(1,"one");
        map.put(2,"two");
        //map.keySet() is now [1,2] even before commit

        db.commit()//persist changes into disk

        map.put(3,"three");
        //map.keySet() is now [1,2,3]
        db.rollback(); //revert recent changes
        //map.keySet() is now [1,2]

        db.close();

    }
View Full Code Here

* requires nodes to be immutable, so this is wrong.
*/
public class MultiMap {

    public static void main(String[] args) {
        DB db = DBMaker.newMemoryDB().make();

        // this is wrong, do not do it !!!
        //  Map<String,List<Long>> map

        //correct way is to use composite set, where 'map key' is primary key and 'map value' is secondary value
        NavigableSet<Object[]> multiMap = db.getTreeSet("test");

        //optionally you can use set with Delta Encoding. This may save lot of space
        multiMap = db.createTreeSet("test2")
                .serializer(BTreeKeySerializer.ARRAY2)
                .make();

        multiMap.add(new Object[]{"aa",1});
        multiMap.add(new Object[]{"aa",2});
        multiMap.add(new Object[]{"aa",3});
        multiMap.add(new Object[]{"bb",1});

        //find all values for a key
        for(Object[] l: Fun.filter(multiMap, "aa")){
            System.out.println("value for key 'aa': "+l[1]);
        }

        //check if pair exists

        boolean found = multiMap.contains(new Object[]{"bb",1});
        System.out.println("Found: " + found);

        db.close();

    }
View Full Code Here

*/
public class Lazily_Loaded_Records {

    public static void main(String[] args) {

        DB db = DBMaker.newMemoryDB().make();
        //
        // TreeMap has build in support for lazily loaded values.
        // In that case each value are not stored inside node,
        // but in separate record.
        //
        // use DB.createTreeMap to create TreeMap with non-default parameters


        Map map = db.createTreeMap("name").valuesOutsideNodesEnable().make();
        map.put("key","this string is loaded lazily with 'map.get(key)' ");


        //
        // Other option for lazily loaded record is to use Atomic.Var.
        // In this case you have singleton record with name.
        // As bonus you can update reference in thread-safe atomic manner.
        //
        Atomic.Var<String> record =
                db.createAtomicVar("lazyRecord", "aaa", db.getDefaultSerializer());

        record.set("some value");
        System.out.println(record.get());


        // Last option is to use low level Engine storage directly.
        // Each stored record gets assigned unique recid (record id),
        // which is latter used to get or update record.
        // Your code should store only recid as reference to object.
        // All MapDB collections are written this way.

        //insert new record
        long recid = db.getEngine().put("something", Serializer.STRING_NOSIZE);

        //load record
        String lazyString = db.getEngine().get(recid, Serializer.STRING_NOSIZE);

        //update record
        db.getEngine().update(recid, "new value", Serializer.STRING_NOSIZE);


        //I hope this example helped!
        db.close();

    }
View Full Code Here

TOP

Related Classes of org.mapdb.DB

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.