Package org.apache.derby.iapi.store.access.conglomerate

Examples of org.apache.derby.iapi.store.access.conglomerate.TransactionManager


    DataValueDescriptor[]   scratch_template,
    DataValueDescriptor[]   rowToInsert,
    int                     flag)
        throws StandardException
    {
        TransactionManager split_xact       = null;
        OpenBTree          split_open_btree = null;
        ControlRow         root             = null;

        // Get an internal transaction to be used for the split.
        split_xact = this.init_open_user_scans.getInternalTransaction();

        // open the btree again so that actions on it take place in the
        // split_xact, don't get any locks in this transaction.

    if (SanityManager.DEBUG)
    {
            if (((getOpenMode() & ContainerHandle.MODE_FORUPDATE) !=
                   ContainerHandle.MODE_FORUPDATE))
            {
                SanityManager.THROWASSERT(
                    "Container not opened with update should not cause split");
            }
    }


        boolean do_split = true;
        if (attempt_to_reclaim_deleted_rows)
        {
            // Get lock on base table.

            ConglomerateController base_cc = null;

            try
            {
                base_cc =
                    this.getConglomerate().lockTable(
                        split_xact,
                        (ContainerHandle.MODE_FORUPDATE |
                         ContainerHandle.MODE_LOCK_NOWAIT),
                        TransactionController.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ);
            }
            catch (StandardException se)
            {
                // any error just don't try to reclaim deleted rows.  The
                // expected error is that we can't get the lock, which the
                // current interface throws as a containerNotFound exception.
            }

            if (base_cc != null)
            {
                // we got IX lock on the base table, so can try reclaim space.


                // We can only reclaim space by opening the btree in row lock
                // mode.  Table level lock row recovery is hard as we can't
                // determine if the deleted rows we encounter have been
                // deleted by our parent caller and have been committed or
                // not.  We will have to get those rows offline.
                split_open_btree = new OpenBTree();
                split_open_btree.init(
                    this.init_open_user_scans,
                    split_xact,
                    null,                           // open the container.
                    split_xact.getRawStoreXact(),
                    false,
                    (ContainerHandle.MODE_FORUPDATE |
                     ContainerHandle.MODE_LOCK_NOWAIT),
                    TransactionManager.MODE_RECORD,
                    this.getConglomerate().getBtreeLockingPolicy(
                        split_xact.getRawStoreXact(),
                        TransactionController.MODE_RECORD,
                        LockingPolicy.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ,
                        (ConglomerateController) base_cc,
                        split_open_btree),
                    this.getConglomerate(),
                    (LogicalUndo) null,
                    (DynamicCompiledOpenConglomInfo) null);

                // don't split if we reclaim any rows.
                do_split = !reclaim_deleted_rows(split_open_btree, leaf_pageno);

                split_open_btree.close();
            }
        }

        long new_leaf_pageno = leaf_pageno;
        if (do_split)
        {
            split_open_btree = new OpenBTree();
            split_open_btree.init(
                this.init_open_user_scans,
                split_xact,
                null,                           // open the container.
                split_xact.getRawStoreXact(),
                false,
                getOpenMode(),                  // use same mode this controller
                                                // was opened with
                TransactionManager.MODE_NONE,
                this.getConglomerate().getBtreeLockingPolicy(
                    split_xact.getRawStoreXact(),
                    this.init_lock_level,
                    LockingPolicy.MODE_RECORD,
                    TransactionController.ISOLATION_REPEATABLE_READ,
                    (ConglomerateController) null, // no base row locks during split
                    split_open_btree),
                this.getConglomerate(),
                (LogicalUndo) null,
                (DynamicCompiledOpenConglomInfo) null);


            // Get the root page back, and perform a split following the
            // to-be-inserted key.  The split releases the root page latch.
            root = ControlRow.get(split_open_btree, BTree.ROOTPAGEID);

            if (SanityManager.DEBUG)
                SanityManager.ASSERT(root.page.isLatched());

            new_leaf_pageno =
                root.splitFor(
                    split_open_btree, scratch_template,
                    null, rowToInsert, flag);

            split_open_btree.close();
        }

        split_xact.commit();

        split_xact.destroy();

        return(new_leaf_pageno);
    }
View Full Code Here


     * @exception  StandardException  Standard exception policy.
     **/
    public int performWork(ContextManager contextMgr)
        throws StandardException
    {
        TransactionManager  tc             = (TransactionManager)
            this.access_factory.getAndNameTransaction(
                contextMgr, AccessFactoryGlobals.SYS_TRANS_NAME);

        TransactionManager  internal_xact  = tc.getInternalTransaction();

        // only requeue if work was not completed in this try.
        boolean             requeue_work = false;

        HeapController      heapcontroller;

        if (SanityManager.DEBUG)
        {
            if (SanityManager.DEBUG_ON("verbose_heap_post_commit"))
                SanityManager.DEBUG_PRINT(
                    "HeapPostCommit", "starting internal xact\n");
        }

        try
        {
            // This call will attempt to open the heap table locked with
            // table level IX mode, preparing to do record level locked space
            // reclamation. 
            //
            // The call will either succeed immediately, or throw an exception
            // which could mean the container does not exist or that the lock
            // could not be granted immediately.

      //Reversed the fix for 4255:
      //page reclaimation is done asynchronosly by raswstore daemon
      //not good to WAIT FOR LOCKS , as it can freeze the daemon
      //If we can not get the lock this reclamation request will
      //requeued.

            heapcontroller = (HeapController)
                heap.open(
                    internal_xact,
                    internal_xact.getRawStoreXact(),
                    false,
                    ContainerHandle.MODE_FORUPDATE |
                    ContainerHandle.MODE_LOCK_NOWAIT,
                    TransactionController.MODE_RECORD,
                    internal_xact.getRawStoreXact().newLockingPolicy(
                        LockingPolicy.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ, true),
                    heap,
                    (DynamicCompiledOpenConglomInfo) null);

            // We got a table intent lock, all deleted rows we encounter can
            // be reclaimed, once an "X" row lock is obtained on them.

            // Process all the rows on the page while holding the latch.
            purgeCommittedDeletes(heapcontroller, this.page_number);

        }
        catch (StandardException se)
        {
            // exception might have occured either because the container got
            // dropper or the lock was not granted.
            // It is possible by the time this post commit work gets scheduled
            // that the container has been dropped and that the open container
            // call will return null - in this case just return assuming no
            // work to be done.

      // If this expcetion is because lock could not be obtained,
            // work is requeued.
      if (se.isLockTimeoutOrDeadlock())
      {
        requeue_work = true;
      }

            // Do not close the controller because that will unlatch the
            // page.  Let the commit and destroy do release the latch and
            // close the controller.
            // heapcontroller.close();
        }
           
        // It is ok to not sync this post work.  If no subsequent log record
        // is sync'd to disk then it is ok that this transaction not make
        // it to the database.  If any subsequent transaction is sync'd to
        // the log file, then this transaction will be sync'd as part of that
        // work.

        internal_xact.commitNoSync(Transaction.RELEASE_LOCKS);
        internal_xact.destroy();


        if (SanityManager.DEBUG)
        {
            if (SanityManager.DEBUG_ON("verbose_heap_post_commit"))
View Full Code Here

     */
    protected void queueDeletePostCommitWork(
    RowPosition pos)
        throws StandardException
    {
        TransactionManager xact_mgr = open_conglom.getXactMgr();

        xact_mgr.addPostCommitWork(
            new HeapPostCommit(
                xact_mgr.getAccessManager(),
                (Heap) open_conglom.getConglomerate(),
                pos.current_page.getPageNumber()));
    }
View Full Code Here

    Transaction                     rawtran)
        throws StandardException
    {
        OpenConglomerate        open_for_ddl_lock   = null;
        HeapController          heapcontroller      = null;
        TransactionManager      nested_xact         = null;

        try
        {
            open_for_ddl_lock = new OpenHeap();

            // Open table in intended exclusive mode in the top level
            // transaction, this will stop any ddl from happening until
            // purge of whole table is finished.

            if (open_for_ddl_lock.init(
                    (ContainerHandle) null,
                    this,
                    this.format_ids,
                    this.collation_ids,
                    xact_manager,
                    rawtran,
                    false,
                    TransactionController.OPENMODE_FORUPDATE,
                    TransactionController.MODE_RECORD,
                    null,
                    null) == null)
            {
                throw StandardException.newException(
                        SQLState.HEAP_CONTAINER_NOT_FOUND,
                        new Long(id.getContainerId()));
            }

            // perform all the "real" work in a non-readonly nested user
            // transaction, so that as work is completed on each page resources
            // can be released.  Must be careful as all locks obtained in nested
            // transaction will conflict with parent transaction - so this call
            // must be made only if parent transaction can have no conflicting
            // locks on the table, otherwise the purge will fail with a self
            // deadlock.
            nested_xact = (TransactionManager)
                xact_manager.startNestedUserTransaction(false, true);

            // now open the table in a nested user transaction so that each
            // page worth of work can be committed after it is done.

            OpenConglomerate open_conglom = new OpenHeap();

            if (open_conglom.init(
                (ContainerHandle) null,
                this,
                this.format_ids,
                this.collation_ids,
                nested_xact,
                nested_xact.getRawStoreXact(),
                true,
                TransactionController.OPENMODE_FORUPDATE,
                TransactionController.MODE_RECORD,
                nested_xact.getRawStoreXact().newLockingPolicy(
                    LockingPolicy.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ, true),
                null) == null)
            {
                throw StandardException.newException(
                        SQLState.HEAP_CONTAINER_NOT_FOUND,
                        new Long(id.getContainerId()).toString());
            }

            heapcontroller = new HeapController();

            heapcontroller.init(open_conglom);

            Page page   = open_conglom.getContainer().getFirstPage();

            boolean purgingDone = false;

            while (page != null)
            {
                long pageno = page.getPageNumber();
                purgingDone = heapcontroller.purgeCommittedDeletes(page);

                if (purgingDone)
                {
                    page = null;

                    // commit xact to free resouurces ASAP, commit will
                    // unlatch the page if it has not already been unlatched
                    // by a remove.
                    open_conglom.getXactMgr().commitNoSync(
                                TransactionController.RELEASE_LOCKS);

                    // the commit closes the underlying container, so let
                    // the heapcontroller know this has happened.  Usually
                    // the transaction takes care of this, but this controller
                    // is internal, so the transaction does not know about it.
                    heapcontroller.closeForEndTransaction(false);
                   
                    // the commit will close the underlying
                    open_conglom.reopen();
                }
                else
                {
                    page.unlatch();
                    page = null;
                }

                page = open_conglom.getContainer().getNextPage(pageno);
            }
        }
        finally
        {
            if (open_for_ddl_lock != null)
                open_for_ddl_lock.close();
            if (heapcontroller != null)
                heapcontroller.close();
            if (nested_xact != null)
            {
                nested_xact.commitNoSync(TransactionController.RELEASE_LOCKS);
                nested_xact.destroy();
            }
        }

        return;
    }
View Full Code Here

    Transaction                     rawtran)
        throws StandardException
    {
        OpenConglomerate        open_for_ddl_lock   = null;
        HeapController          heapcontroller      = null;
        TransactionManager      nested_xact         = null;

        try
        {
            open_for_ddl_lock = new OpenHeap();

            // Open table in intended exclusive mode in the top level
            // transaction, this will stop any ddl from happening until
            // purge of whole table is finished.

            if (open_for_ddl_lock.init(
                    (ContainerHandle) null,
                    this,
                    this.format_ids,
                    this.collation_ids,
                    xact_manager,
                    rawtran,
                    false,
                    TransactionController.OPENMODE_FORUPDATE,
                    TransactionController.MODE_RECORD,
                    null,
                    null) == null)
            {
                throw StandardException.newException(
                        SQLState.HEAP_CONTAINER_NOT_FOUND,
                        new Long(id.getContainerId()));
            }

            // perform all the "real" work in a non-readonly nested user
            // transaction, so that as work is completed on each page resources
            // can be released.  Must be careful as all locks obtained in nested
            // transaction will conflict with parent transaction - so this call
            // must be made only if parent transaction can have no conflicting
            // locks on the table, otherwise the purge will fail with a self
            // deadlock.
            nested_xact = (TransactionManager)
                xact_manager.startNestedUserTransaction(false, true);

            // now open the table in a nested user transaction so that each
            // page worth of work can be committed after it is done.

            OpenConglomerate open_conglom = new OpenHeap();

            if (open_conglom.init(
                (ContainerHandle) null,
                this,
                this.format_ids,
                this.collation_ids,
                nested_xact,
                nested_xact.getRawStoreXact(),
                true,
                TransactionController.OPENMODE_FORUPDATE,
                TransactionController.MODE_RECORD,
                nested_xact.getRawStoreXact().newLockingPolicy(
                    LockingPolicy.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ, true),
                null) == null)
            {
                throw StandardException.newException(
                        SQLState.HEAP_CONTAINER_NOT_FOUND,
                        new Long(id.getContainerId()).toString());
            }

            heapcontroller = new HeapController();

            heapcontroller.init(open_conglom);

            Page page   = open_conglom.getContainer().getFirstPage();

            boolean purgingDone = false;

            while (page != null)
            {
                long pageno = page.getPageNumber();
                purgingDone = heapcontroller.purgeCommittedDeletes(page);

                if (purgingDone)
                {
                    page = null;

                    // commit xact to free resouurces ASAP, commit will
                    // unlatch the page if it has not already been unlatched
                    // by a remove.
                    open_conglom.getXactMgr().commitNoSync(
                                TransactionController.RELEASE_LOCKS);

                    // the commit closes the underlying container, so let
                    // the heapcontroller know this has happened.  Usually
                    // the transaction takes care of this, but this controller
                    // is internal, so the transaction does not know about it.
                    heapcontroller.closeForEndTransaction(false);
                   
                    // the commit will close the underlying
                    open_conglom.reopen();
                }
                else
                {
                    page.unlatch();
                    page = null;
                }

                page = open_conglom.getContainer().getNextPage(pageno);
            }
        }
        finally
        {
            if (open_for_ddl_lock != null)
                open_for_ddl_lock.close();
            if (heapcontroller != null)
                heapcontroller.close();
            if (nested_xact != null)
            {
                nested_xact.commitNoSync(TransactionController.RELEASE_LOCKS);
                nested_xact.destroy();
            }
        }

        return;
    }
View Full Code Here

    DataValueDescriptor[]   scratch_template,
    DataValueDescriptor[]   rowToInsert,
    int                     flag)
        throws StandardException
    {
        TransactionManager split_xact       = null;
        OpenBTree          split_open_btree = null;
        ControlRow         root             = null;

        // Get an internal transaction to be used for the split.
        split_xact = this.init_open_user_scans.getInternalTransaction();

        // open the btree again so that actions on it take place in the
        // split_xact, don't get any locks in this transaction.

    if (SanityManager.DEBUG)
    {
            if (((getOpenMode() & ContainerHandle.MODE_FORUPDATE) !=
                   ContainerHandle.MODE_FORUPDATE))
            {
                SanityManager.THROWASSERT(
                    "Container not opened with update should not cause split");
            }
    }


        boolean do_split = true;
        if (attempt_to_reclaim_deleted_rows)
        {
            // Get lock on base table.

            ConglomerateController base_cc = null;

            try
            {
                base_cc =
                    this.getConglomerate().lockTable(
                        split_xact,
                        (ContainerHandle.MODE_FORUPDATE |
                         ContainerHandle.MODE_LOCK_NOWAIT),
                        TransactionController.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ);
            }
            catch (StandardException se)
            {
                // any error just don't try to reclaim deleted rows.  The
                // expected error is that we can't get the lock, which the
                // current interface throws as a containerNotFound exception.
            }

            if (base_cc != null)
            {
                // we got IX lock on the base table, so can try reclaim space.


                // We can only reclaim space by opening the btree in row lock
                // mode.  Table level lock row recovery is hard as we can't
                // determine if the deleted rows we encounter have been
                // deleted by our parent caller and have been committed or
                // not.  We will have to get those rows offline.
                split_open_btree = new OpenBTree();
                split_open_btree.init(
                    this.init_open_user_scans,
                    split_xact,
                    null,                           // open the container.
                    split_xact.getRawStoreXact(),
                    false,
                    (ContainerHandle.MODE_FORUPDATE |
                     ContainerHandle.MODE_LOCK_NOWAIT),
                    TransactionManager.MODE_RECORD,
                    this.getConglomerate().getBtreeLockingPolicy(
                        split_xact.getRawStoreXact(),
                        TransactionController.MODE_RECORD,
                        LockingPolicy.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ,
                        (ConglomerateController) base_cc,
                        split_open_btree),
                    this.getConglomerate(),
                    (LogicalUndo) null,
                    (DynamicCompiledOpenConglomInfo) null);

                // don't split if we reclaim any rows.
                do_split = !reclaim_deleted_rows(split_open_btree, leaf_pageno);

                // on return if !do_split then the latch on leaf_pageno is held
                // and will be released by the committing or aborting the
                // transaction.  If a purge has been done, no other action on
                // the page should be attempted (ie. a split) before committing
                // the purges.

                split_open_btree.close();
            }
        }

        long new_leaf_pageno = leaf_pageno;
        if (do_split)
        {
            // no space was reclaimed from deleted rows, so do split to allow
            // space for a subsequent insert.

            split_open_btree = new OpenBTree();
            split_open_btree.init(
                this.init_open_user_scans,
                split_xact,
                null,                           // open the container.
                split_xact.getRawStoreXact(),
                false,
                getOpenMode(),                  // use same mode this controller
                                                // was opened with
                TransactionManager.MODE_NONE,
                this.getConglomerate().getBtreeLockingPolicy(
                    split_xact.getRawStoreXact(),
                    this.init_lock_level,
                    LockingPolicy.MODE_RECORD,
                    TransactionController.ISOLATION_REPEATABLE_READ,
                    (ConglomerateController) null, // no base row locks during split
                    split_open_btree),
                this.getConglomerate(),
                (LogicalUndo) null,
                (DynamicCompiledOpenConglomInfo) null);


            // Get the root page back, and perform a split following the
            // to-be-inserted key.  The split releases the root page latch.
            root = ControlRow.get(split_open_btree, BTree.ROOTPAGEID);

            if (SanityManager.DEBUG)
                SanityManager.ASSERT(root.page.isLatched());

            new_leaf_pageno =
                root.splitFor(
                    split_open_btree, scratch_template,
                    null, rowToInsert, flag);

            split_open_btree.close();
        }

        split_xact.commit();

        split_xact.destroy();

        return(new_leaf_pageno);
    }
View Full Code Here

     * @exception  StandardException  Standard exception policy.
     **/
    public int performWork(ContextManager contextMgr)
        throws StandardException
    {
        TransactionManager  tc             = (TransactionManager)
            this.access_factory.getAndNameTransaction(
                contextMgr, AccessFactoryGlobals.SYS_TRANS_NAME);

        TransactionManager  internal_xact  = tc.getInternalTransaction();

        // only requeue if work was not completed in this try.
        boolean             requeue_work = false;

        HeapController      heapcontroller;

        if (SanityManager.DEBUG)
        {
            if (SanityManager.DEBUG_ON("verbose_heap_post_commit"))
                SanityManager.DEBUG_PRINT(
                    "HeapPostCommit", "starting internal xact\n");
        }

        try
        {
            // This call will attempt to open the heap table locked with
            // table level IX mode, preparing to do record level locked space
            // reclamation. 
            //
            // The call will either succeed immediately, or throw an exception
            // which could mean the container does not exist or that the lock
            // could not be granted immediately.

      //Reversed the fix for 4255:
      //page reclaimation is done asynchronosly by raswstore daemon
      //not good to WAIT FOR LOCKS , as it can freeze the daemon
      //If we can not get the lock this reclamation request will
      //requeued.

            heapcontroller = (HeapController)
                heap.open(
                    internal_xact,
                    internal_xact.getRawStoreXact(),
                    false,
                    ContainerHandle.MODE_FORUPDATE |
                    ContainerHandle.MODE_LOCK_NOWAIT,
                    TransactionController.MODE_RECORD,
                    internal_xact.getRawStoreXact().newLockingPolicy(
                        LockingPolicy.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ, true),
                    heap,
                    (DynamicCompiledOpenConglomInfo) null);

            // We got a table intent lock, all deleted rows we encounter can
            // be reclaimed, once an "X" row lock is obtained on them.

            // Process all the rows on the page while holding the latch.
            purgeCommittedDeletes(heapcontroller, this.page_number);

        }
        catch (StandardException se)
        {
            // exception might have occured either container got dropper or lock not granted.
            // It is possible by the time this post commit work gets scheduled
            // that the container has been dropped and that the open container
            // call will return null - in this case just return assuming no
            // work to be done.

      //If this expcetion is because lock could not be obtained , work is requeued.
      if (se.getMessageId().equals(SQLState.LOCK_TIMEOUT) ||
        se.getMessageId().equals(SQLState.DEADLOCK))
      {
        requeue_work = true;
      }

            // Do not close the controller because that will unlatch the
            // page.  Let the commit and destroy do release the latch and
            // close the controller.
            // heapcontroller.close();
        }
           
        // It is ok to not sync this post work.  If no subsequent log record
        // is sync'd to disk then it is ok that this transaction not make
        // it to the database.  If any subsequent transaction is sync'd to
        // the log file, then this transaction will be sync'd as part of that
        // work.

        internal_xact.commitNoSync(Transaction.RELEASE_LOCKS);
        internal_xact.destroy();


        if (SanityManager.DEBUG)
        {
            if (SanityManager.DEBUG_ON("verbose_heap_post_commit"))
View Full Code Here

    {

        // requeue if work was not completed in this try because of locks
        boolean             requeue_work = false;

        TransactionManager tc             = (TransactionManager)
            this.access_factory.getAndNameTransaction(
                contextMgr, AccessFactoryGlobals.SYS_TRANS_NAME);

        TransactionManager  internal_xact  = tc.getInternalTransaction();

        if (SanityManager.DEBUG)
        {
            if (SanityManager.DEBUG_ON("verbose_btree_post_commit"))
                System.out.println("starting internal xact\n");
        }

        OpenBTree           open_btree = null;

        try
        {
            // Get lock on base table.
           
            // First attempt to get a table lock on the btree.  This lock is
            // requested NOWAIT to not impede normal operation on the table.
            // If the lock were to wait then the current lock manager livelock
            // algorithm would block all subsequent lock requests on this
            // btree even if they are compatible with the current holder of
            // the lock.
            //
            // If this lock is granted then:
            // 1) deleted rows on the page can automatically be purged as
            //    they must be committed, otherwise lock would not have been
            //    granted.
            // 2) if all rows from page are reclaimed then a structure shrink
            //    which requires table level lock can be executed.
            //
            open_btree =
                openIndex(
                    internal_xact,
                    TransactionController.MODE_TABLE,
                    LockingPolicy.MODE_CONTAINER);

            DataValueDescriptor[] shrink_key =
                purgeCommittedDeletes(open_btree, this.page_number);

            if (shrink_key != null)
                doShrink(open_btree, shrink_key);
        }
        catch (StandardException se)
        {
            // 2 kinds of errors here expected here.  Either container not
            // found or could not obtain lock (LOCK_TIMEOUT or DEADLOCK).
            //
            // It is possible by the time this post commit work gets scheduled
            // that the container has been dropped and that the open container
            // call will return null - in this case just return assuming no
            // work to be done.

      if (se.getMessageId().equals(SQLState.LOCK_TIMEOUT) ||
        se.getMessageId().equals(SQLState.DEADLOCK))
      {
                // Could not get exclusive table lock, so try row level
                // reclaim of just the rows on this page.  No merge is
                // attempted.

                try
                {
                    open_btree =
                        openIndex(
                            internal_xact,
                            TransactionController.MODE_RECORD,
                            LockingPolicy.MODE_RECORD);

                    purgeRowLevelCommittedDeletes(open_btree);

                }
                catch (StandardException se2)
                {
                    if (se2.getMessageId().equals(SQLState.LOCK_TIMEOUT) ||
                        se2.getMessageId().equals(SQLState.DEADLOCK))
                    {
                        // Could not get intended exclusive table lock, so
                        // requeue and hope other user gives up table level
                        // lock soon.  This should not be normal case.
                        requeue_work = true;
                    }
                }
            }
        }
        finally
        {
            if (open_btree != null)
                open_btree.close();

            // counting on this commit to release latches associated with
            // row level purge, that have been left to prevent others from
            // getting to purged pages before the commit.  If latch is released
            // early, other transactions could insert on the page which could
            // prevent undo of the purges in case of a crash before the commit
            // gets to the disk.
            internal_xact.commit();
            internal_xact.destroy();
        }

        return(requeue_work ? Serviceable.REQUEUE : Serviceable.DONE);
    }
View Full Code Here

    DataValueDescriptor[]   scratch_template,
    DataValueDescriptor[]   rowToInsert,
    int                     flag)
        throws StandardException
    {
        TransactionManager split_xact       = null;
        OpenBTree          split_open_btree = null;
        ControlRow         root             = null;

        // Get an internal transaction to be used for the split.
        split_xact = this.init_open_user_scans.getInternalTransaction();

        // open the btree again so that actions on it take place in the
        // split_xact, don't get any locks in this transaction.

    if (SanityManager.DEBUG)
    {
            if (((getOpenMode() & ContainerHandle.MODE_FORUPDATE) !=
                   ContainerHandle.MODE_FORUPDATE))
            {
                SanityManager.THROWASSERT(
                    "Container not opened with update should not cause split");
            }
    }


        boolean do_split = true;
        if (attempt_to_reclaim_deleted_rows)
        {
            // Get lock on base table.

            ConglomerateController base_cc = null;

            try
            {
                base_cc =
                    this.getConglomerate().lockTable(
                        split_xact,
                        (ContainerHandle.MODE_FORUPDATE |
                         ContainerHandle.MODE_LOCK_NOWAIT),
                        TransactionController.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ);
            }
            catch (StandardException se)
            {
                // any error just don't try to reclaim deleted rows.  The
                // expected error is that we can't get the lock, which the
                // current interface throws as a containerNotFound exception.
            }

            if (base_cc != null)
            {
                // we got IX lock on the base table, so can try reclaim space.


                // We can only reclaim space by opening the btree in row lock
                // mode.  Table level lock row recovery is hard as we can't
                // determine if the deleted rows we encounter have been
                // deleted by our parent caller and have been committed or
                // not.  We will have to get those rows offline.
                split_open_btree = new OpenBTree();
                split_open_btree.init(
                    this.init_open_user_scans,
                    split_xact,
                    null,                           // open the container.
                    split_xact.getRawStoreXact(),
                    false,
                    (ContainerHandle.MODE_FORUPDATE |
                     ContainerHandle.MODE_LOCK_NOWAIT),
                    TransactionManager.MODE_RECORD,
                    this.getConglomerate().getBtreeLockingPolicy(
                        split_xact.getRawStoreXact(),
                        TransactionController.MODE_RECORD,
                        LockingPolicy.MODE_RECORD,
                        TransactionController.ISOLATION_REPEATABLE_READ,
                        (ConglomerateController) base_cc,
                        split_open_btree),
                    this.getConglomerate(),
                    (LogicalUndo) null,
                    (DynamicCompiledOpenConglomInfo) null);

                // don't split if we reclaim any rows.
                do_split = !reclaim_deleted_rows(split_open_btree, leaf_pageno);

                split_open_btree.close();
            }
        }

        long new_leaf_pageno = leaf_pageno;
        if (do_split)
        {
            split_open_btree = new OpenBTree();
            split_open_btree.init(
                this.init_open_user_scans,
                split_xact,
                null,                           // open the container.
                split_xact.getRawStoreXact(),
                false,
                getOpenMode(),                  // use same mode this controller
                                                // was opened with
                TransactionManager.MODE_NONE,
                this.getConglomerate().getBtreeLockingPolicy(
                    split_xact.getRawStoreXact(),
                    this.init_lock_level,
                    LockingPolicy.MODE_RECORD,
                    TransactionController.ISOLATION_REPEATABLE_READ,
                    (ConglomerateController) null, // no base row locks during split
                    split_open_btree),
                this.getConglomerate(),
                (LogicalUndo) null,
                (DynamicCompiledOpenConglomInfo) null);


            // Get the root page back, and perform a split following the
            // to-be-inserted key.  The split releases the root page latch.
            root = ControlRow.Get(split_open_btree, BTree.ROOTPAGEID);

            if (SanityManager.DEBUG)
                SanityManager.ASSERT(root.page.isLatched());

            new_leaf_pageno =
                root.splitFor(
                    split_open_btree, scratch_template,
                    null, rowToInsert, flag);

            split_open_btree.close();
        }

        split_xact.commit();

        split_xact.destroy();

        return(new_leaf_pageno);
    }
View Full Code Here

        {

            // the following open should fail with a containerNotFound error
           
            // for testing purposes - assume TransactionController can be casted
            TransactionManager tm = (TransactionManager) tc;
            ConglomerateController cc =
                testbtree.open(
                    tm, tm.getRawStoreXact(), false, 0, 0,
                    (LockingPolicy) null,
                    null, null);

            throw T_Fail.testFailMsg("bad open succeeded.");
        }
        catch(StandardException t)
        {
            // expected path comes here.
        }


        // create the base table
        DataValueDescriptor[]   base_row        = TemplateRow.newU8Row(2);
        T_SecondaryIndexRow     index_row1      = new T_SecondaryIndexRow();

        long base_conglomid =
            tc.createConglomerate(
                "heap",   // create a heap conglomerate
                base_row, // base table template row
                null, //column sort order - not required for heap
                null, //default collation
                null,     // default properties
                TransactionController.IS_DEFAULT);

        // Open the base table
        ConglomerateController base_cc =
            tc.openConglomerate(
                base_conglomid,
                false,
                0,
                TransactionController.MODE_RECORD,
                TransactionController.ISOLATION_SERIALIZABLE);

        RowLocation         base_rowloc1    = base_cc.newRowLocationTemplate();

        index_row1.init(base_row, base_rowloc1, 3);

        // create the secondary index
        Properties properties =
            createProperties(
                null,           // no current properties list
                false,          // don't allow duplicates
                3,   // index on all base row cols + row location
                2,   // non-unique index
                true,           // maintain parent links
                -42, // fake base conglom for now
                2);      // row loc in last column

        TransactionManager tm = (TransactionManager) tc;

        // test bad property
        try
        {
            testbtree.create(
View Full Code Here

TOP

Related Classes of org.apache.derby.iapi.store.access.conglomerate.TransactionManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.