Package org.apache.derby.iapi.services.io

Examples of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream


    reservedSpaceFieldId += startField;


    // the new data the needs to be written at newOffset but can't until
    // unsedSpace >= newDataToWrite.length (allowing for the header)
    DynamicByteArrayOutputStream newDataToWrite = null;

    rawDataOut.setPosition(newOffset);

    // write the record header, which may change in size
    int oldLength = recordHeader.size();
    int newLength = newRecorderHeader.size();

    int unusedSpace = oldLength; // the unused space at newOffset

    // no fields, so we can eat into the reserve space
    if (reservedSpaceFieldId < startField) // no fields
      unusedSpace += getReservedCount(slot);

    if (unusedSpace >= newLength) {
      newRecorderHeader.write(rawDataOut);
      newOffset += newLength;
      unusedSpace -= newLength;
     
    } else {     

      newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
      newRecorderHeader.write(newDataToWrite);
    }
    oldOffset += oldLength;
    int recordDelta = (newLength - oldLength);

    int oldFieldStatus = 0;
    int oldFieldDataLength = 0;
    int newFieldStatus = 0;
    int newFieldDataLength = 0;

    int oldEndFieldExclusive = startField + oldFieldCount;
    int newEndFieldExclusive = startField + newFieldCount;

    for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {

      int oldFieldLength = 0;
      if (fieldId < oldEndFieldExclusive) {
        rawDataIn.setPosition(oldOffset);
        oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
        oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
        oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)
          + oldFieldDataLength;
      }

      newFieldStatus = StoredFieldHeader.readStatus(in);
      newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);

      // if no value was provided on an update of a field then use the old value,
      // unless the old field didn't exist.
      if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {

        // may need to move this old field ...
        if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
          // the is no old data to catch up on, is the data at
          // the correct position already?
          if (newOffset == oldOffset) {
            // yes, nothing to do!!
            if (SanityManager.DEBUG) {
              if (unusedSpace != 0)
              SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
            }
          } else {
            // need to shift the field left
            if (SanityManager.DEBUG) {
              if (unusedSpace != (oldOffset - newOffset))
              SanityManager.THROWASSERT(
                "Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
            }

            System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
          }
          newOffset += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

        } else {
          // there is data still to be written, just append this field to the
          // saved data
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + oldFieldLength);
          System.arraycopy(pageData, oldOffset,
            newDataToWrite.getByteArray(), position, oldFieldLength);

          unusedSpace += oldFieldLength;

          // last field to be updated can eat into the reserve space
          if (fieldId == reservedSpaceFieldId)
            unusedSpace += getReservedCount(slot);

          // attempt to write out some of what we have in the side buffer now.
          int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
          newOffset += copyLength;
          unusedSpace -= copyLength;

        }
        oldOffset += oldFieldLength;
        continue;
      }

      newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);

      int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
      int newFieldLength = newFieldHeaderLength + newFieldDataLength;

      recordDelta += (newFieldLength - oldFieldLength);

      // See if we can write this field now

      // space available increases by the amount of the old field
      unusedSpace += oldFieldLength;
      oldOffset += oldFieldLength;

      // last field to be updated can eat into the reserve space
      if (fieldId == reservedSpaceFieldId)
        unusedSpace += getReservedCount(slot);

      if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

        // catch up on the old data if possible
        int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
        newOffset += copyLength;
        unusedSpace -= copyLength;
      }

      if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))
        && (unusedSpace >= newFieldHeaderLength)) {

        // can fit the header in
        rawDataOut.setPosition(newOffset);
        newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
        unusedSpace -= newFieldHeaderLength;

        if (newFieldDataLength != 0) {

          // read as much as the field as possible
          int fieldCopy = unusedSpace >= newFieldDataLength ?
              newFieldDataLength : unusedSpace;

          if (fieldCopy != 0) {
            in.readFully(pageData, newOffset, fieldCopy);

            newOffset += fieldCopy;
            unusedSpace -= fieldCopy;
          }


          fieldCopy = newFieldDataLength - fieldCopy;
          if (fieldCopy != 0) {
            if (newDataToWrite == null)
              newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

            // append the remaining portion of the field to the saved data
            int position = newDataToWrite.getPosition();
            newDataToWrite.setPosition(position + fieldCopy);
            in.readFully(newDataToWrite.getByteArray(),
                position, fieldCopy);

          }
        }
      } else {
        // can't fit these header, or therefore the field, append it
        // to the buffer.

        if (newDataToWrite == null)
          newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);

        StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);

        // save the new field data
        if (newFieldDataLength != 0) {
          int position = newDataToWrite.getPosition();
          newDataToWrite.setPosition(position + newFieldDataLength);
          in.readFully(newDataToWrite.getByteArray(),
                position, newFieldDataLength);
        }
      }
    }

    // at this point there may still be data left in the saved buffer
    // but presumably we can't fit it in

    int reservedDelta;

    if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {

      // need to shift the later records down ...
      int nextRecordOffset = startingOffset + getTotalSpace(slot);

      int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);

      if (SanityManager.DEBUG) {
        if (newOffset > nextRecordOffset)
          SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset
              + " next record " + nextRecordOffset);

        if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
          SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace
          + " newDataToWrite.getUsed() " + newDataToWrite.getUsed()
          + " nextRecordOffset " + nextRecordOffset
          + " newOffset " + newOffset
          + " reservedSpaceFieldId " + reservedSpaceFieldId
          + " startField " + startField
          + " newEndFieldExclusive " + newEndFieldExclusive
          + " newFieldCount " + newFieldCount
          + " oldFieldCount " + oldFieldCount
          + " slot " + slot
          + " freeSpace " + freeSpace
          + " unusedSpace " + unusedSpace
          + " page " + getPageId());


        if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
          SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +
            "free space take " + spaceRequiredFromFreeSpace +
            "record delta " + recordDelta);

      }

      if (spaceRequiredFromFreeSpace > freeSpace) {
        throw dataFactory.markCorrupt(
                    StandardException.newException(
                        SQLState.DATA_CORRUPT_PAGE, getPageId()));
      }

      // see if this is the last record on the page, if so a simple
      // shift of the remaining fields will sufice...
      expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);

      unusedSpace += spaceRequiredFromFreeSpace;

      moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);

      reservedDelta = -1 * getReservedCount(slot);

      if (SanityManager.DEBUG) {
        if (newDataToWrite.getUsed() != 0)
          SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
      }
    } else {
      reservedDelta = -1 * recordDelta;
    }

 
View Full Code Here


                 (startColumn <  endFieldExclusive)))
      {
        boolean                 hitLongColumn;
        int                     nextColumn      = -1;
        Object[]   savedFields     = null;
        DynamicByteArrayOutputStream  logBuffer       = null;

        do
                {
          try
                    {
            // Update this portion of the record.
            // Pass in headRowHandle in case we are to update any
            // long column and they need to be cleaned up by post
            // commit processing.  We don't want to purge the
            // columns right now because in order to reclaim the
            // page, we need to remove them.  But it would be bad
            // to remove them now because the transaction may not
            // commit for a long time.  We can do both purging of
            // the long column and page removal together in the
            // post commit.
            nextColumn =
                            owner.getActionSet().actionUpdate(
                                t, curPage, slot, id, row, validColumns,
                  realStartColumn, logBuffer,
                                realSpaceOnPage, headRowHandle);

            hitLongColumn = false;

          }
                    catch (LongColumnException lce)
                    {
 
            if (lce.getRealSpaceOnPage() == -1)
                        {
              // an update that has caused the row to increase
                            // in size *and* push some fields off the page
                            // that need to be inserted in an overflow page

              // no need to make a copy as we are going to use
                            // this buffer right away
              logBuffer = lce.getLogBuffer();

              savedFields     =
                                (Object[]) lce.getColumn();
                           
              realStartColumn = lce.getNextColumn();
              realSpaceOnPage = -1;

              hitLongColumn   = true;

              continue;
            }

           
            // we caught a real long column exception
            // three things should happen here:
            // 1. insert the long column into overflow pages.
            // 2. append the overflow field header in the main chain.
            // 3. continue the update in the main data chain.
            logBuffer =
                            new DynamicByteArrayOutputStream(lce.getLogBuffer());

            // step 1: insert the long column ... if this update
                        // operation rolls back, purge the after image column
                        // chain and reclaim the overflow page because the
                        // whole chain will be orphaned anyway.
View Full Code Here

        byte[] buffer = null;
        int length = 0;

        try {
            DynamicByteArrayOutputStream dbaos = new DynamicByteArrayOutputStream();
            ObjectOutputStream oos = new ObjectOutputStream( dbaos );

            oos.writeObject( val );

            buffer = dbaos.getByteArray();
            length = dbaos.getUsed();
           
        } catch(IOException e)
        {
            agent.markCommunicationsFailure
                ( e,"DDMWriter.writeUDT()", "", e.getMessage(), "" );
View Full Code Here

  protected DynamicByteArrayOutputStream outBytes; 
  protected ArrayInputStream  limitIn;
 
  public DirectActions() {
    outBytes = new DynamicByteArrayOutputStream();
    limitIn = new ArrayInputStream();
  }
View Full Code Here

  protected DynamicByteArrayOutputStream outBytes; 
  protected ArrayInputStream  limitIn;
 
  public DirectActions() {
    outBytes = new DynamicByteArrayOutputStream();
    limitIn = new ArrayInputStream();
  }
View Full Code Here

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(this.page != null);
    }

    DynamicByteArrayOutputStream logBuffer = t.getLogBuffer();
    int optionalDataStart = logBuffer.getPosition();

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(optionalDataStart == 0,
        "Buffer for writing the optional data should start at position 0");
    }

    for (int i = 0; i < num_rows; i++)
    {
      if(needDataLogged)
      {
        this.page.logRecord(i+slot, BasePage.LOG_RECORD_DEFAULT,
                  recordIds[i](FormatableBitSet) null, logBuffer,
                  (RecordHandle)null);
      }else
      {
        this.page.logRecord(i+slot, BasePage.LOG_RECORD_FOR_PURGE,
                  recordIds[i](FormatableBitSet) null, logBuffer,
                  (RecordHandle)null);
      }
    }
   
    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;

    if (SanityManager.DEBUG) {
      if (optionalDataLength != logBuffer.getUsed())
        SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = "
          + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
    }

    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);

    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart,
      optionalDataLength);
  }
View Full Code Here

                 (startColumn <  endFieldExclusive)))
            {
                boolean                 hitLongColumn;
                int                     nextColumn      = -1;
                Object[]   savedFields     = null;
                DynamicByteArrayOutputStream  logBuffer       = null;

                do
                {
                    try
                    {
                        // Update this portion of the record.
                        // Pass in headRowHandle in case we are to update any
                        // long column and they need to be cleaned up by post
                        // commit processing.  We don't want to purge the
                        // columns right now because in order to reclaim the
                        // page, we need to remove them.  But it would be bad
                        // to remove them now because the transaction may not
                        // commit for a long time.  We can do both purging of
                        // the long column and page removal together in the
                        // post commit.
                        nextColumn =
                            owner.getActionSet().actionUpdate(
                                t, curPage, slot, id, row, validColumns,
                                realStartColumn, logBuffer,
                                realSpaceOnPage, headRowHandle);

                        hitLongColumn = false;

                    }
                    catch (LongColumnException lce)
                    {
   
                        if (lce.getRealSpaceOnPage() == -1)
                        {
                            // an update that has caused the row to increase
                            // in size *and* push some fields off the page
                            // that need to be inserted in an overflow page

                            // no need to make a copy as we are going to use
                            // this buffer right away
                            logBuffer = lce.getLogBuffer();

                            savedFields     =
                                (Object[]) lce.getColumn();
                           
                            realStartColumn = lce.getNextColumn();
                            realSpaceOnPage = -1;

                            hitLongColumn   = true;

                            continue;
                        }

                       
                        // we caught a real long column exception
                        // three things should happen here:
                        // 1. insert the long column into overflow pages.
                        // 2. append the overflow field header in the main chain.
                        // 3. continue the update in the main data chain.
                        logBuffer =
                            new DynamicByteArrayOutputStream(lce.getLogBuffer());

                        // step 1: insert the long column ... if this update
                        // operation rolls back, purge the after image column
                        // chain and reclaim the overflow page because the
                        // whole chain will be orphaned anyway.
View Full Code Here

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(this.page != null);
    }

    DynamicByteArrayOutputStream logBuffer = t.getLogBuffer();
    int optionalDataStart = logBuffer.getPosition();

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(optionalDataStart == 0,
        "Buffer for writing the optional data should start at position 0");
    }

    if (undo != null)
      this.page.logRecord(doMeSlot, BasePage.LOG_RECORD_DEFAULT,
                recordId,  (FormatableBitSet) null, logBuffer,
                (RecordHandle)null);
   
    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;

    if (SanityManager.DEBUG) {
      if (optionalDataLength != logBuffer.getUsed())
        SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = "
          + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
    }

    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);

    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart,
      optionalDataLength);

  }
View Full Code Here

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(this.page != null);
      SanityManager.ASSERT(srcPage != null);
    }

    DynamicByteArrayOutputStream logBuffer = t.getLogBuffer();
    int optionalDataStart = logBuffer.getPosition();

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(optionalDataStart == 0,
        "Buffer for writing the optional data should start at position 0");
    }

    // check to make sure the destination page have the necessary space to
    // take the rows
    int[] spaceNeeded = new int[num_rows];
    int startPosition = logBuffer.getPosition();

    for (int i = 0; i < num_rows; i++)
    {
      // the recordId passed in is the record Id this row will have at
      // the destination page, not the record Id this row has on the
      // srcPage.
      srcPage.logRecord(i + srcSlot, BasePage.LOG_RECORD_DEFAULT,
                recordIds[i](FormatableBitSet) null, logBuffer,
                (RecordHandle)null);
      spaceNeeded[i] = logBuffer.getPosition() - startPosition;
      startPosition = logBuffer.getPosition();

      // now spaceNeeded[i] has the actual record size.  However, the src
      // page may actually leave more space for the record due to
      // reserved space.  Because we want to copy the reserve space as well,
      // we need to take into account that amount.
      spaceNeeded[i] += reservedSpace[i];
     }

    // page is the destination page.
    if (!page.spaceForCopy(num_rows, spaceNeeded))
        {
      throw StandardException.newException(
                    SQLState.DATA_NO_SPACE_FOR_RECORD);
        }

    int optionalDataLength = logBuffer.getPosition() - optionalDataStart;
   
    if (SanityManager.DEBUG) {
      if (optionalDataLength != logBuffer.getUsed())
        SanityManager.THROWASSERT("wrong optional data length, optionalDataLength = "
          + optionalDataLength + ", logBuffer.getUsed() = " + logBuffer.getUsed());
    }

    // set the position to the beginning of the buffer
    logBuffer.setPosition(optionalDataStart);

    this.preparedLog = new ByteArray(logBuffer.getByteArray(), optionalDataStart,
      optionalDataLength);
  }
View Full Code Here

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(this.page != null);
    }

    DynamicByteArrayOutputStream localLogBuffer = null;
    if (logBuffer != null) {
      localLogBuffer = (DynamicByteArrayOutputStream) logBuffer;
    } else {
      realStartColumn = -1;
      realSpaceOnPage = -1;
      localLogBuffer = t.getLogBuffer();
    }
   
    if (isLongColumn) {
      this.startColumn = this.page.logLongColumn(doMeSlot, recordId,
        row[0], localLogBuffer);
    } else {
      this.startColumn = this.page.logRow(doMeSlot, true, recordId,
        row, validColumns, localLogBuffer, this.startColumn, insertFlag,
        realStartColumn, realSpaceOnPage, overflowThreshold);
    }

    int optionalDataStart = localLogBuffer.getBeginPosition();
    int optionalDataLength = localLogBuffer.getPosition() - optionalDataStart;

    this.preparedLog = new ByteArray (localLogBuffer.getByteArray(), optionalDataStart,
      optionalDataLength);
  }
View Full Code Here

TOP

Related Classes of org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.