Examples of DynamicByteArrayOutputStream


Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(this.page != null);
    }

    DynamicByteArrayOutputStream logBuffer = t.getLogBuffer();
    int optionalDataStart = logBuffer.getPosition();

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(optionalDataStart == 0,
        "Buffer for writing the optional data should start at position 0");
    }

    for (int i = 0; i < num_rows; i++)
    {
      if(needDataLogged)
      {
        this.page.logRecord(i+slot, BasePage.LOG_RECORD_DEFAULT,
                  recordIds[i](FormatableBitSet) null, logBuffer,
                  (RecordHandle)null);
      }else
      {
        this.page.logRecord(i+slot, BasePage.LOG_RECORD_FOR_PURGE,
                  recordIds[i](FormatableBitSet) null, logBuffer,
                  (RecordHandle)null);
      }
    }
   
    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;

    if (SanityManager.DEBUG) {
      if (optionalDataLength != logBuffer.getUsed())
        SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = "
          + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
    }

    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);

    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart,
      optionalDataLength);
  }
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

  }

  public DynamicByteArrayOutputStream getLogBuffer() {

    if (logBuffer == null) {
      logBuffer = new DynamicByteArrayOutputStream(1024);
    } else {
      logBuffer.reset();
    }

    return logBuffer;
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

        // does the estimate think it won't fit, if not return false to avoid
        // cost of calling logRow() just to figure out if the row will fit.
    if (!allowInsert())
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      // This is a public call, start column is rawstore only. 
      // set the starting Column for the row to be 0.
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    throws StandardException
  {
    if (!(spaceForInsert() && (freeSpace >= spaceNeeded)))
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      logRow(
                0, true, nextId, row, validColumns, out, startColumn,
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    reservedSpaceFieldId += startField;


    // the new data the needs to be written at newOffset but can't until
    // unsedSpace >= newDataToWrite.length (allowing for the header)
    DynamicByteArrayOutputStream newDataToWrite = null;

    rawDataOut.setPosition(newOffset);

    // write the record header, which may change in size
    int oldLength = recordHeader.size();
    int newLength = newRecorderHeader.size();

    int unusedSpace = oldLength; // the unused space at newOffset

    // no fields, so we can eat into the reserve space
    if (reservedSpaceFieldId < startField) // no fields
      unusedSpace += getReservedCount(slot);

    if (unusedSpace >= newLength) {
      newRecorderHeader.write(rawDataOut);
      newOffset += newLength;
      unusedSpace -= newLength;
     
    } else {     

      newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
      newRecorderHeader.write(newDataToWrite);
    }
    oldOffset += oldLength;
    int recordDelta = (newLength - oldLength);

    int oldFieldStatus = 0;
    int oldFieldDataLength = 0;
    int newFieldStatus = 0;
    int newFieldDataLength = 0;

    int oldEndFieldExclusive = startField + oldFieldCount;
    int newEndFieldExclusive = startField + newFieldCount;

    for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {

      int oldFieldLength = 0;
      if (fieldId < oldEndFieldExclusive) {
        rawDataIn.setPosition(oldOffset);
        oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
        oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
        oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)
          + oldFieldDataLength;
      }

      newFieldStatus = StoredFieldHeader.readStatus(in);
      newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);

      // if no value was provided on an update of a field then use the old value,
      // unless the old field didn't exist.
      if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {

        // may need to move this old field ...
        if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
          // the is no old data to catch up on, is the data at
          // the correct position already?
          if (newOffset == oldOffset) {
            // yes, nothing to do!!
            if (SanityManager.DEBUG) {
              if (unusedSpace != 0)
              SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
            }
          } else {
            // need to shift the field left
            if (SanityManager.DEBUG) {
              if (unusedSpace != (oldOffset - newOffset))
              SanityManager.THROWASSERT(
                "Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
            }

            System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
          }
          newOffset += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

        } else {
          // there is data still to be written, just append this field to the
          // saved data
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + oldFieldLength);
          System.arraycopy(pageData, oldOffset,
            newDataToWrite.getByteArray(), position, oldFieldLength);

          unusedSpace += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

          // attempt to write out some of what we have in the side buffer now.
          int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
          newOffset += copyLength;
          unusedSpace -= copyLength;

        }
        oldOffset += oldFieldLength;
        continue;
      }

      newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);

      int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
      int newFieldLength = newFieldHeaderLength + newFieldDataLength;

      recordDelta += (newFieldLength - oldFieldLength);

      // See if we can write this field now

      // space available increases by the amount of the old field
      unusedSpace += oldFieldLength;
      oldOffset += oldFieldLength;

      // last field to be updated can eat into the reserve space
      if (fieldId == reservedSpaceFieldId)
        unusedSpace += getReservedCount(slot);

      if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

        // catch up on the old data if possible
        int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
        newOffset += copyLength;
        unusedSpace -= copyLength;
      }

      if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))
        && (unusedSpace >= newFieldHeaderLength)) {

        // can fit the header in
        rawDataOut.setPosition(newOffset);
        newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
        unusedSpace -= newFieldHeaderLength;

        if (newFieldDataLength != 0) {

          // read as much as the field as possible
          int fieldCopy = unusedSpace >= newFieldDataLength ?
              newFieldDataLength : unusedSpace;

          if (fieldCopy != 0) {
            in.readFully(pageData, newOffset, fieldCopy);

            newOffset += fieldCopy;
            unusedSpace -= fieldCopy;
          }


          fieldCopy = newFieldDataLength - fieldCopy;
          if (fieldCopy != 0) {
            if (newDataToWrite == null)
              newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

            // append the remaining portion of the field to the saved data
            int position = newDataToWrite.getPosition();
            newDataToWrite.setPosition(position + fieldCopy);
            in.readFully(newDataToWrite.getByteArray(),
                position, fieldCopy);

          }
        }
      } else {
        // can't fit these header, or therefore the field, append it
        // to the buffer.

        if (newDataToWrite == null)
          newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

        StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);

        // save the new field data
        if (newFieldDataLength != 0) {
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + newFieldDataLength);
          in.readFully(newDataToWrite.getByteArray(),
                position, newFieldDataLength);
        }
      }
    }

    // at this point there may still be data left in the saved buffer
    // but presumably we can't fit it in

    int reservedDelta;

    if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

      // need to shift the later records down ...
      int nextRecordOffset = startingOffset + getTotalSpace(slot);

      int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);

      if (SanityManager.DEBUG) {
        if (newOffset > nextRecordOffset)
          SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset
              + " next record " + nextRecordOffset);

        if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
          SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace
          + " newDataToWrite.getUsed() " + newDataToWrite.getUsed()
          + " nextRecordOffset " + nextRecordOffset
          + " newOffset " + newOffset
          + " reservedSpaceFieldId " + reservedSpaceFieldId
          + " startField " + startField
          + " newEndFieldExclusive " + newEndFieldExclusive
          + " newFieldCount " + newFieldCount
          + " oldFieldCount " + oldFieldCount
          + " slot " + slot
          + " freeSpace " + freeSpace
          + " unusedSpace " + unusedSpace
          + " page " + getPageId());


        if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
          SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +
            "free space take " + spaceRequiredFromFreeSpace +
            "record delta " + recordDelta);

      }

      if (spaceRequiredFromFreeSpace > freeSpace) {
        throw dataFactory.markCorrupt(
                    StandardException.newException(
                        SQLState.DATA_CORRUPT_PAGE, getPageId()));
      }

      // see if this is the last record on the page, if so a simple
      // shift of the remaining fields will sufice...
      expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);

      unusedSpace += spaceRequiredFromFreeSpace;

      moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);

      reservedDelta = -1 * getReservedCount(slot);

      if (SanityManager.DEBUG) {
        if (newDataToWrite.getUsed() != 0)
          SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
      }
    } else {
      reservedDelta = -1 * recordDelta;
    }

 
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

                 (startColumn <  endFieldExclusive)))
      {
        boolean                 hitLongColumn;
        int                     nextColumn      = -1;
        Object[]   savedFields     = null;
        DynamicByteArrayOutputStream  logBuffer       = null;

        do
                {
          try
                    {
            // Update this portion of the record.
            // Pass in headRowHandle in case we are to update any
            // long column and they need to be cleaned up by post
            // commit processing.  We don't want to purge the
            // columns right now because in order to reclaim the
            // page, we need to remove them.  But it would be bad
            // to remove them now because the transaction may not
            // commit for a long time.  We can do both purging of
            // the long column and page removal together in the
            // post commit.
            nextColumn =
                            owner.getActionSet().actionUpdate(
                                t, curPage, slot, id, row, validColumns,
                  realStartColumn, logBuffer,
                                realSpaceOnPage, headRowHandle);

            hitLongColumn = false;

          }
                    catch (LongColumnException lce)
                    {
 
            if (lce.getRealSpaceOnPage() == -1)
                        {
              // an update that has caused the row to increase
                            // in size *and* push some fields off the page
                            // that need to be inserted in an overflow page

              // no need to make a copy as we are going to use
                            // this buffer right away
              logBuffer = lce.getLogBuffer();

              savedFields     =
                                (Object[]) lce.getColumn();
                           
              realStartColumn = lce.getNextColumn();
              realSpaceOnPage = -1;

              hitLongColumn   = true;

              continue;
            }

           
            // we caught a real long column exception
            // three things should happen here:
            // 1. insert the long column into overflow pages.
            // 2. append the overflow field header in the main chain.
            // 3. continue the update in the main data chain.
            logBuffer =
                            new DynamicByteArrayOutputStream(lce.getLogBuffer());

            // step 1: insert the long column ... if this update
                        // operation rolls back, purge the after image column
                        // chain and reclaim the overflow page because the
                        // whole chain will be orphaned anyway.
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

        // does the estimate think it won't fit, if not return false to avoid
        // cost of calling logRow() just to figure out if the row will fit.
    if (!allowInsert())
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      // This is a public call, start column is rawstore only. 
      // set the starting Column for the row to be 0.
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    throws StandardException
  {
    if (!(spaceForInsert() && (freeSpace >= spaceNeeded)))
      return false;

    DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();

    try
        {
      logRow(
                0, true, nextId, row, validColumns, out, startColumn,
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

    reservedSpaceFieldId += startField;


    // the new data the needs to be written at newOffset but can't until
    // unsedSpace >= newDataToWrite.length (allowing for the header)
    DynamicByteArrayOutputStream newDataToWrite = null;

    rawDataOut.setPosition(newOffset);

    // write the record header, which may change in size
    int oldLength = recordHeader.size();
    int newLength = newRecorderHeader.size();

    int unusedSpace = oldLength; // the unused space at newOffset

    // no fields, so we can eat into the reserve space
    if (reservedSpaceFieldId < startField) // no fields
      unusedSpace += getReservedCount(slot);

    if (unusedSpace >= newLength) {
      newRecorderHeader.write(rawDataOut);
      newOffset += newLength;
      unusedSpace -= newLength;
     
    } else {     

      newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
      newRecorderHeader.write(newDataToWrite);
    }
    oldOffset += oldLength;
    int recordDelta = (newLength - oldLength);

    int oldFieldStatus = 0;
    int oldFieldDataLength = 0;
    int newFieldStatus = 0;
    int newFieldDataLength = 0;

    int oldEndFieldExclusive = startField + oldFieldCount;
    int newEndFieldExclusive = startField + newFieldCount;

    for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {

      int oldFieldLength = 0;
      if (fieldId < oldEndFieldExclusive) {
        rawDataIn.setPosition(oldOffset);
        oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
        oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
        oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)
          + oldFieldDataLength;
      }

      newFieldStatus = StoredFieldHeader.readStatus(in);
      newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);

      // if no value was provided on an update of a field then use the old value,
      // unless the old field didn't exist.
      if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {

        // may need to move this old field ...
        if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
          // the is no old data to catch up on, is the data at
          // the correct position already?
          if (newOffset == oldOffset) {
            // yes, nothing to do!!
            if (SanityManager.DEBUG) {
              if (unusedSpace != 0)
              SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
            }
          } else {
            // need to shift the field left
            if (SanityManager.DEBUG) {
              if (unusedSpace != (oldOffset - newOffset))
              SanityManager.THROWASSERT(
                "Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
            }

            System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
          }
          newOffset += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

        } else {
          // there is data still to be written, just append this field to the
          // saved data
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + oldFieldLength);
          System.arraycopy(pageData, oldOffset,
            newDataToWrite.getByteArray(), position, oldFieldLength);

          unusedSpace += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

          // attempt to write out some of what we have in the side buffer now.
          int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
          newOffset += copyLength;
          unusedSpace -= copyLength;

        }
        oldOffset += oldFieldLength;
        continue;
      }

      newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);

      int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
      int newFieldLength = newFieldHeaderLength + newFieldDataLength;

      recordDelta += (newFieldLength - oldFieldLength);

      // See if we can write this field now

      // space available increases by the amount of the old field
      unusedSpace += oldFieldLength;
      oldOffset += oldFieldLength;

      // last field to be updated can eat into the reserve space
      if (fieldId == reservedSpaceFieldId)
        unusedSpace += getReservedCount(slot);

      if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

        // catch up on the old data if possible
        int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
        newOffset += copyLength;
        unusedSpace -= copyLength;
      }

      if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))
        && (unusedSpace >= newFieldHeaderLength)) {

        // can fit the header in
        rawDataOut.setPosition(newOffset);
        newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
        unusedSpace -= newFieldHeaderLength;

        if (newFieldDataLength != 0) {

          // read as much as the field as possible
          int fieldCopy = unusedSpace >= newFieldDataLength ?
              newFieldDataLength : unusedSpace;

          if (fieldCopy != 0) {
            in.readFully(pageData, newOffset, fieldCopy);

            newOffset += fieldCopy;
            unusedSpace -= fieldCopy;
          }


          fieldCopy = newFieldDataLength - fieldCopy;
          if (fieldCopy != 0) {
            if (newDataToWrite == null)
              newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

            // append the remaining portion of the field to the saved data
            int position = newDataToWrite.getPosition();
            newDataToWrite.setPosition(position + fieldCopy);
            in.readFully(newDataToWrite.getByteArray(),
                position, fieldCopy);

          }
        }
      } else {
        // can't fit these header, or therefore the field, append it
        // to the buffer.

        if (newDataToWrite == null)
          newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

        StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);

        // save the new field data
        if (newFieldDataLength != 0) {
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + newFieldDataLength);
          in.readFully(newDataToWrite.getByteArray(),
                position, newFieldDataLength);
        }
      }
    }

    // at this point there may still be data left in the saved buffer
    // but presumably we can't fit it in

    int reservedDelta;

    if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

      // need to shift the later records down ...
      int nextRecordOffset = startingOffset + getTotalSpace(slot);

      int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);

      if (SanityManager.DEBUG) {
        if (newOffset > nextRecordOffset)
          SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset
              + " next record " + nextRecordOffset);

        if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
          SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace
          + " newDataToWrite.getUsed() " + newDataToWrite.getUsed()
          + " nextRecordOffset " + nextRecordOffset
          + " newOffset " + newOffset
          + " reservedSpaceFieldId " + reservedSpaceFieldId
          + " startField " + startField
          + " newEndFieldExclusive " + newEndFieldExclusive
          + " newFieldCount " + newFieldCount
          + " oldFieldCount " + oldFieldCount
          + " slot " + slot
          + " freeSpace " + freeSpace
          + " unusedSpace " + unusedSpace
          + " page " + getPageId());


        if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
          SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +
            "free space take " + spaceRequiredFromFreeSpace +
            "record delta " + recordDelta);

      }

      if (spaceRequiredFromFreeSpace > freeSpace) {
        throw dataFactory.markCorrupt(
                    StandardException.newException(
                        SQLState.DATA_CORRUPT_PAGE, getPageId()));
      }

      // see if this is the last record on the page, if so a simple
      // shift of the remaining fields will sufice...
      expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);

      unusedSpace += spaceRequiredFromFreeSpace;

      moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);

      reservedDelta = -1 * getReservedCount(slot);

      if (SanityManager.DEBUG) {
        if (newDataToWrite.getUsed() != 0)
          SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
      }
    } else {
      reservedDelta = -1 * recordDelta;
    }

 
View Full Code Here

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

                 (startColumn <  endFieldExclusive)))
      {
        boolean                 hitLongColumn;
        int                     nextColumn      = -1;
        Object[]   savedFields     = null;
        DynamicByteArrayOutputStream  logBuffer       = null;

        do
                {
          try
                    {
            // Update this portion of the record.
            // Pass in headRowHandle in case we are to update any
            // long column and they need to be cleaned up by post
            // commit processing.  We don't want to purge the
            // columns right now because in order to reclaim the
            // page, we need to remove them.  But it would be bad
            // to remove them now because the transaction may not
            // commit for a long time.  We can do both purging of
            // the long column and page removal together in the
            // post commit.
            nextColumn =
                            owner.getActionSet().actionUpdate(
                                t, curPage, slot, id, row, validColumns,
                  realStartColumn, logBuffer,
                                realSpaceOnPage, headRowHandle);

            hitLongColumn = false;

          }
                    catch (LongColumnException lce)
                    {
 
            if (lce.getRealSpaceOnPage() == -1)
                        {
              // an update that has caused the row to increase
                            // in size *and* push some fields off the page
                            // that need to be inserted in an overflow page

              // no need to make a copy as we are going to use
                            // this buffer right away
              logBuffer = lce.getLogBuffer();

              savedFields     =
                                (Object[]) lce.getColumn();
                           
              realStartColumn = lce.getNextColumn();
              realSpaceOnPage = -1;

              hitLongColumn   = true;

              continue;
            }

           
            // we caught a real long column exception
            // three things should happen here:
            // 1. insert the long column into overflow pages.
            // 2. append the overflow field header in the main chain.
            // 3. continue the update in the main data chain.
            logBuffer =
                            new DynamicByteArrayOutputStream(lce.getLogBuffer());

            // step 1: insert the long column ... if this update
                        // operation rolls back, purge the after image column
                        // chain and reclaim the overflow page because the
                        // whole chain will be orphaned anyway.
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.