Examples of DynamicByteArrayOutputStream


Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

  }

  public DynamicByteArrayOutputStream getLogBuffer() {

    if (logBuffer == null) {
      logBuffer = new DynamicByteArrayOutputStream(1024);
    } else {
      logBuffer.reset();
    }

    return logBuffer;
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

      boolean isLongColumns   = false;
      int     realStartColumn = -1;
      int     realSpaceOnPage = -1;

      DynamicByteArrayOutputStream logBuffer = null;

            // allocate new record id and handle
            int          recordId = curPage.newRecordIdAndBump();
      RecordHandle handle   =
                new RecordId(curPage.getPageId(), recordId, slot);

      if (curPage == this) {


        // Lock the row, if it is the very first portion of the record.
        if (handleToUpdate == null) {

                    while (!owner.getLockingPolicy().lockRecordForWrite(
                                t, handle,
                                true  /* lock is for insert */,
                                false /* don't wait for grant */)) {

                        // loop until we get a new record id we can get a lock
                        // on.  If we can't get the lock without waiting then
                        // assume the record id is owned by another xact.  The
                        // current heap overflow algorithm makes this likely,
                        // as it first try's to insert a row telling raw store
                        // to fail if it doesn't fit on the page getting a lock
                        // on an id that never makes it to disk.   The
                        // inserting transaction will hold a lock on this
                        // "unused" record id until it commits.  The page can
                        // leave the cache at this point, and the inserting
                        // transaction has not dirtied the page (it failed
                        // after getting the lock but before logging anything),
                        // another inserting transaction will then get the
                        // same id as the previous inserter - thus the loop on
                        // lock waits.
                        //
                        // The lock we request indicates that this is a lock
                        // for insert, which the locking policy may use to
                        // perform locking concurrency optimizations.

                        // allocate new record id and handle
                        recordId = curPage.newRecordIdAndBump();
                        handle   =
                            new RecordId(curPage.getPageId(), recordId, slot);
                    }
        }

        headHandle = handle;
      }

      do {

        // do this loop at least once.  If we caught a long Column,
        // then, we redo the insert with saved logBuffer.
        try {

          startColumn =
                        owner.getActionSet().actionInsert(
                            t, curPage, slot, recordId,
                            row, validColumns, (LogicalUndo) null,
                            insertFlag, startColumn, false,
                            realStartColumn, logBuffer, realSpaceOnPage,
                            overflowThreshold);
          isLongColumns = false;

        } catch (LongColumnException lce) {


          // we caught a long column exception
          // three things should happen here:
          // 1. insert the long column into overflow pages.
          // 2. append the overflow field header in the main chain.
          // 3. continue the insert in the main data chain.
          logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());

          // step 1: insert the long column ... use the same
          // insertFlag as the rest of the row.
          RecordHandle longColumnHandle =
            insertLongColumn(curPage, lce, insertFlag);
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

  }

  public DynamicByteArrayOutputStream getLogBuffer() {

    if (logBuffer == null) {
      logBuffer = new DynamicByteArrayOutputStream(1024);
    } else {
      logBuffer.reset();
    }

    return logBuffer;
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

        // does the estimate think it won't fit, if not return false to avoid
        // cost of calling logRow() just to figure out if the row will fit.
    if (!allowInsert())
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      // This is a public call, start column is rawstore only. 
      // set the starting Column for the row to be 0.
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    throws StandardException
  {
    if (!(spaceForInsert() && (freeSpace >= spaceNeeded)))
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      logRow(
                0, true, nextId, row, validColumns, out, startColumn,
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    reservedSpaceFieldId += startField;


    // the new data the needs to be written at newOffset but can't until
    // unsedSpace >= newDataToWrite.length (allowing for the header)
    DynamicByteArrayOutputStream newDataToWrite = null;

    rawDataOut.setPosition(newOffset);

    // write the record header, which may change in size
    int oldLength = recordHeader.size();
    int newLength = newRecorderHeader.size();

    int unusedSpace = oldLength; // the unused space at newOffset

    // no fields, so we can eat into the reserve space
    if (reservedSpaceFieldId < startField) // no fields
      unusedSpace += getReservedCount(slot);

    if (unusedSpace >= newLength) {
      newRecorderHeader.write(rawDataOut);
      newOffset += newLength;
      unusedSpace -= newLength;
     
    } else {     

      newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
      newRecorderHeader.write(newDataToWrite);
    }
    oldOffset += oldLength;
    int recordDelta = (newLength - oldLength);

    int oldFieldStatus = 0;
    int oldFieldDataLength = 0;
    int newFieldStatus = 0;
    int newFieldDataLength = 0;

    int oldEndFieldExclusive = startField + oldFieldCount;
    int newEndFieldExclusive = startField + newFieldCount;

    for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {

      int oldFieldLength = 0;
      if (fieldId < oldEndFieldExclusive) {
        rawDataIn.setPosition(oldOffset);
        oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
        oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
        oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)
          + oldFieldDataLength;
      }

      newFieldStatus = StoredFieldHeader.readStatus(in);
      newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);

      // if no value was provided on an update of a field then use the old value,
      // unless the old field didn't exist.
      if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {

        // may need to move this old field ...
        if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
          // the is no old data to catch up on, is the data at
          // the correct position already?
          if (newOffset == oldOffset) {
            // yes, nothing to do!!
            if (SanityManager.DEBUG) {
              if (unusedSpace != 0)
              SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
            }
          } else {
            // need to shift the field left
            if (SanityManager.DEBUG) {
              if (unusedSpace != (oldOffset - newOffset))
              SanityManager.THROWASSERT(
                "Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
            }

            System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
          }
          newOffset += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

        } else {
          // there is data still to be written, just append this field to the
          // saved data
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + oldFieldLength);
          System.arraycopy(pageData, oldOffset,
            newDataToWrite.getByteArray(), position, oldFieldLength);

          unusedSpace += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

          // attempt to write out some of what we have in the side buffer now.
          int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
          newOffset += copyLength;
          unusedSpace -= copyLength;

        }
        oldOffset += oldFieldLength;
        continue;
      }

      newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);

      int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
      int newFieldLength = newFieldHeaderLength + newFieldDataLength;

      recordDelta += (newFieldLength - oldFieldLength);

      // See if we can write this field now

      // space available increases by the amount of the old field
      unusedSpace += oldFieldLength;
      oldOffset += oldFieldLength;

      // last field to be updated can eat into the reserve space
      if (fieldId == reservedSpaceFieldId)
        unusedSpace += getReservedCount(slot);

      if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

        // catch up on the old data if possible
        int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
        newOffset += copyLength;
        unusedSpace -= copyLength;
      }

      if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))
        && (unusedSpace >= newFieldHeaderLength)) {

        // can fit the header in
        rawDataOut.setPosition(newOffset);
        newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
        unusedSpace -= newFieldHeaderLength;

        if (newFieldDataLength != 0) {

          // read as much as the field as possible
          int fieldCopy = unusedSpace >= newFieldDataLength ?
              newFieldDataLength : unusedSpace;

          if (fieldCopy != 0) {
            in.readFully(pageData, newOffset, fieldCopy);

            newOffset += fieldCopy;
            unusedSpace -= fieldCopy;
          }


          fieldCopy = newFieldDataLength - fieldCopy;
          if (fieldCopy != 0) {
            if (newDataToWrite == null)
              newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

            // append the remaining portion of the field to the saved data
            int position = newDataToWrite.getPosition();
            newDataToWrite.setPosition(position + fieldCopy);
            in.readFully(newDataToWrite.getByteArray(),
                position, fieldCopy);

          }
        }
      } else {
        // can't fit these header, or therefore the field, append it
        // to the buffer.

        if (newDataToWrite == null)
          newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

        StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);

        // save the new field data
        if (newFieldDataLength != 0) {
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + newFieldDataLength);
          in.readFully(newDataToWrite.getByteArray(),
                position, newFieldDataLength);
        }
      }
    }

    // at this point there may still be data left in the saved buffer
    // but presumably we can't fit it in

    int reservedDelta;

    if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

      // need to shift the later records down ...
      int nextRecordOffset = startingOffset + getTotalSpace(slot);

      int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);

      if (SanityManager.DEBUG) {
        if (newOffset > nextRecordOffset)
          SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset
              + " next record " + nextRecordOffset);

        if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
          SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace
          + " newDataToWrite.getUsed() " + newDataToWrite.getUsed()
          + " nextRecordOffset " + nextRecordOffset
          + " newOffset " + newOffset
          + " reservedSpaceFieldId " + reservedSpaceFieldId
          + " startField " + startField
          + " newEndFieldExclusive " + newEndFieldExclusive
          + " newFieldCount " + newFieldCount
          + " oldFieldCount " + oldFieldCount
          + " slot " + slot
          + " freeSpace " + freeSpace
          + " unusedSpace " + unusedSpace
          + " page " + getPageId());


        if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
          SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +
            "free space take " + spaceRequiredFromFreeSpace +
            "record delta " + recordDelta);

      }

      if (spaceRequiredFromFreeSpace > freeSpace) {
        throw dataFactory.markCorrupt(
                    StandardException.newException(
                        SQLState.DATA_CORRUPT_PAGE, getPageId()));
      }

      // see if this is the last record on the page, if so a simple
      // shift of the remaining fields will sufice...
      expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);

      unusedSpace += spaceRequiredFromFreeSpace;

      moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);

      reservedDelta = -1 * getReservedCount(slot);

      if (SanityManager.DEBUG) {
        if (newDataToWrite.getUsed() != 0)
          SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
      }
    } else {
      reservedDelta = -1 * recordDelta;
    }

 
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

                 (startColumn <  endFieldExclusive)))
      {
        boolean                 hitLongColumn;
        int                     nextColumn      = -1;
        Object[]   savedFields     = null;
        DynamicByteArrayOutputStream  logBuffer       = null;

        do
                {
          try
                    {
            // Update this portion of the record.
            // Pass in headRowHandle in case we are to update any
            // long column and they need to be cleaned up by post
            // commit processing.  We don't want to purge the
            // columns right now because in order to reclaim the
            // page, we need to remove them.  But it would be bad
            // to remove them now because the transaction may not
            // commit for a long time.  We can do both purging of
            // the long column and page removal together in the
            // post commit.
            nextColumn =
                            owner.getActionSet().actionUpdate(
                                t, curPage, slot, id, row, validColumns,
                  realStartColumn, logBuffer,
                                realSpaceOnPage, headRowHandle);

            hitLongColumn = false;

          }
                    catch (LongColumnException lce)
                    {
 
            if (lce.getRealSpaceOnPage() == -1)
                        {
              // an update that has caused the row to increase
                            // in size *and* push some fields off the page
                            // that need to be inserted in an overflow page

              // no need to make a copy as we are going to use
                            // this buffer right away
              logBuffer = lce.getLogBuffer();

              savedFields     =
                                (Object[]) lce.getColumn();
                           
              realStartColumn = lce.getNextColumn();
              realSpaceOnPage = -1;

              hitLongColumn   = true;

              continue;
            }

           
            // we caught a real long column exception
            // three things should happen here:
            // 1. insert the long column into overflow pages.
            // 2. append the overflow field header in the main chain.
            // 3. continue the update in the main data chain.
            logBuffer =
                            new DynamicByteArrayOutputStream(lce.getLogBuffer());

            // step 1: insert the long column ... if this update
                        // operation rolls back, purge the after image column
                        // chain and reclaim the overflow page because the
                        // whole chain will be orphaned anyway.
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

        // does the estimate think it won't fit, if not return false to avoid
        // cost of calling logRow() just to figure out if the row will fit.
    if (!allowInsert())
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      // This is a public call, start column is rawstore only. 
      // set the starting Column for the row to be 0.
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    throws StandardException
  {
    if (!(spaceForInsert() && (freeSpace >= spaceNeeded)))
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      logRow(
                0, true, nextId, row, validColumns, out, startColumn,
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    reservedSpaceFieldId += startField;


    // the new data the needs to be written at newOffset but can't until
    // unsedSpace >= newDataToWrite.length (allowing for the header)
    DynamicByteArrayOutputStream newDataToWrite = null;

    rawDataOut.setPosition(newOffset);

    // write the record header, which may change in size
    int oldLength = recordHeader.size();
    int newLength = newRecorderHeader.size();

    int unusedSpace = oldLength; // the unused space at newOffset

    // no fields, so we can eat into the reserve space
    if (reservedSpaceFieldId < startField) // no fields
      unusedSpace += getReservedCount(slot);

    if (unusedSpace >= newLength) {
      newRecorderHeader.write(rawDataOut);
      newOffset += newLength;
      unusedSpace -= newLength;
     
    } else {     

      newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
      newRecorderHeader.write(newDataToWrite);
    }
    oldOffset += oldLength;
    int recordDelta = (newLength - oldLength);

    int oldFieldStatus = 0;
    int oldFieldDataLength = 0;
    int newFieldStatus = 0;
    int newFieldDataLength = 0;

    int oldEndFieldExclusive = startField + oldFieldCount;
    int newEndFieldExclusive = startField + newFieldCount;

    for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {

      int oldFieldLength = 0;
      if (fieldId < oldEndFieldExclusive) {
        rawDataIn.setPosition(oldOffset);
        oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
        oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
        oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)
          + oldFieldDataLength;
      }

      newFieldStatus = StoredFieldHeader.readStatus(in);
      newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);

      // if no value was provided on an update of a field then use the old value,
      // unless the old field didn't exist.
      if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {

        // may need to move this old field ...
        if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
          // the is no old data to catch up on, is the data at
          // the correct position already?
          if (newOffset == oldOffset) {
            // yes, nothing to do!!
            if (SanityManager.DEBUG) {
              if (unusedSpace != 0)
              SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
            }
          } else {
            // need to shift the field left
            if (SanityManager.DEBUG) {
              if (unusedSpace != (oldOffset - newOffset))
              SanityManager.THROWASSERT(
                "Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
            }

            System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
          }
          newOffset += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

        } else {
          // there is data still to be written, just append this field to the
          // saved data
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + oldFieldLength);
          System.arraycopy(pageData, oldOffset,
            newDataToWrite.getByteArray(), position, oldFieldLength);

          unusedSpace += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

          // attempt to write out some of what we have in the side buffer now.
          int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
          newOffset += copyLength;
          unusedSpace -= copyLength;

        }
        oldOffset += oldFieldLength;
        continue;
      }

      newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);

      int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
      int newFieldLength = newFieldHeaderLength + newFieldDataLength;

      recordDelta += (newFieldLength - oldFieldLength);

      // See if we can write this field now

      // space available increases by the amount of the old field
      unusedSpace += oldFieldLength;
      oldOffset += oldFieldLength;

      // last field to be updated can eat into the reserve space
      if (fieldId == reservedSpaceFieldId)
        unusedSpace += getReservedCount(slot);

      if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

        // catch up on the old data if possible
        int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
        newOffset += copyLength;
        unusedSpace -= copyLength;
      }

      if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))
        && (unusedSpace >= newFieldHeaderLength)) {

        // can fit the header in
        rawDataOut.setPosition(newOffset);
        newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
        unusedSpace -= newFieldHeaderLength;

        if (newFieldDataLength != 0) {

          // read as much as the field as possible
          int fieldCopy = unusedSpace >= newFieldDataLength ?
              newFieldDataLength : unusedSpace;

          if (fieldCopy != 0) {
            in.readFully(pageData, newOffset, fieldCopy);

            newOffset += fieldCopy;
            unusedSpace -= fieldCopy;
          }


          fieldCopy = newFieldDataLength - fieldCopy;
          if (fieldCopy != 0) {
            if (newDataToWrite == null)
              newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

            // append the remaining portion of the field to the saved data
            int position = newDataToWrite.getPosition();
            newDataToWrite.setPosition(position + fieldCopy);
            in.readFully(newDataToWrite.getByteArray(),
                position, fieldCopy);

          }
        }
      } else {
        // can't fit these header, or therefore the field, append it
        // to the buffer.

        if (newDataToWrite == null)
          newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

        StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);

        // save the new field data
        if (newFieldDataLength != 0) {
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + newFieldDataLength);
          in.readFully(newDataToWrite.getByteArray(),
                position, newFieldDataLength);
        }
      }
    }

    // at this point there may still be data left in the saved buffer
    // but presumably we can't fit it in

    int reservedDelta;

    if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

      // need to shift the later records down ...
      int nextRecordOffset = startingOffset + getTotalSpace(slot);

      int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);

      if (SanityManager.DEBUG) {
        if (newOffset > nextRecordOffset)
          SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset
              + " next record " + nextRecordOffset);

        if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
          SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace
          + " newDataToWrite.getUsed() " + newDataToWrite.getUsed()
          + " nextRecordOffset " + nextRecordOffset
          + " newOffset " + newOffset
          + " reservedSpaceFieldId " + reservedSpaceFieldId
          + " startField " + startField
          + " newEndFieldExclusive " + newEndFieldExclusive
          + " newFieldCount " + newFieldCount
          + " oldFieldCount " + oldFieldCount
          + " slot " + slot
          + " freeSpace " + freeSpace
          + " unusedSpace " + unusedSpace
          + " page " + getPageId());


        if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
          SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +
            "free space take " + spaceRequiredFromFreeSpace +
            "record delta " + recordDelta);

      }

      if (spaceRequiredFromFreeSpace > freeSpace) {
        throw dataFactory.markCorrupt(
                    StandardException.newException(
                        SQLState.DATA_CORRUPT_PAGE, getPageId()));
      }

      // see if this is the last record on the page, if so a simple
      // shift of the remaining fields will sufice...
      expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);

      unusedSpace += spaceRequiredFromFreeSpace;

      moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);

      reservedDelta = -1 * getReservedCount(slot);

      if (SanityManager.DEBUG) {
        if (newDataToWrite.getUsed() != 0)
          SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
      }
    } else {
      reservedDelta = -1 * recordDelta;
    }

 
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.