Package org.apache.derby.iapi.services.io

Examples of org.apache.derby.iapi.services.io.ArrayInputStream


  }


  private void readAllocPageHeader() throws IOException
  {
    ArrayInputStream lrdi = rawDataIn;
    lrdi.setPosition(ALLOC_PAGE_HEADER_OFFSET);

    nextAllocPageNumber = lrdi.readLong();
    nextAllocPageOffset = lrdi.readLong();
    reserved1 = lrdi.readLong();
    reserved2 = lrdi.readLong();
    reserved3 = lrdi.readLong();
    reserved4 = lrdi.readLong();
  }
View Full Code Here


  }

  private AllocExtent readExtent(int offset)
     throws IOException, ClassNotFoundException
  {
    ArrayInputStream lrdi = rawDataIn;
    rawDataIn.setPosition(offset);
    AllocExtent newExtent = new AllocExtent();
    newExtent.readExternal(lrdi);

    // in 1.3 or later, make sure the upgrade from before 1.3 is legal.
View Full Code Here

    @exception IOException error in reading the header from file
  */
  private void readHeaderFromArray(byte[] a)
     throws StandardException, IOException
  {
    ArrayInputStream inStream = new ArrayInputStream(a);

    inStream.setLimit(0, CONTAINER_INFO_SIZE);
    int fid = inStream.readInt();
    if (fid != formatIdInteger)
        {
      throw StandardException.newException(
                SQLState.DATA_UNKNOWN_CONTAINER_FORMAT, getIdentity(),
                new Long(fid));
        }

    int status = inStream.readInt();
    pageSize = inStream.readInt();
    spareSpace = inStream.readInt();
    minimumRecordSize = inStream.readInt();
    initialPages = inStream.readShort();
    PreAllocSize = inStream.readShort();
    firstAllocPageNumber = inStream.readLong();
    firstAllocPageOffset = inStream.readLong();
    containerVersion = inStream.readLong();
    estimatedRowCount = inStream.readLong();
    reusableRecordIdSequenceNumber = inStream.readLong();
    lastLogInstant = null;

    if (PreAllocSize == 0// pre 2.0, we don't store this.
      PreAllocSize = DEFAULT_PRE_ALLOC_SIZE;

    long spare3 = inStream.readLong()// read spare long

    // upgrade - if this is a container that was created before
    // initialPages was stored, it will have a zero value.  Set it to the
    // default of 1.
    if (initialPages == 0
      initialPages = 1;

    // container read in from disk, reset preAllocation values
    PreAllocThreshold = PRE_ALLOC_THRESHOLD;

    // validate checksum
    long onDiskChecksum = inStream.readLong();
    checksum.reset();
    checksum.update(a, 0, CONTAINER_INFO_SIZE - CHECKSUM_SIZE);

    if (onDiskChecksum != checksum.getValue())
    {
View Full Code Here

    byte[] array = byteArray.getArray();
   
    // now extract the relavent information from array - basically
    // duplicate the code in readHeaderFromArray
    ArrayInputStream inStream = new ArrayInputStream(array);

    int status = 0;

    try
    {     
      inStream.setLimit(0, CONTAINER_INFO_SIZE);

      int fid = inStream.readInt();
      if (fid != formatIdInteger)
      {
        // RESOLVE: do something about this when we have > 1 container format
        throw StandardException.newException(
                    SQLState.DATA_UNKNOWN_CONTAINER_FORMAT,
                    getIdentity(), new Long(fid));
      }

      status = inStream.readInt();
      pageSize = inStream.readInt();
      spareSpace = inStream.readInt();
      minimumRecordSize = inStream.readInt();
      initialPages = inStream.readShort();

    }
    catch (IOException ioe)
    {
      throw StandardException.newException(
View Full Code Here

    // doMe.
    //
    // Keep in mind the dynamic nature of the logOutputBuffer which means
    // it could switch buffer from underneath the logOutputBuffer on every
    // write.
    logIn = new ArrayInputStream();
    logRecord = new LogRecord();

  }
View Full Code Here

    RePreparable lop          = null;

    // stream to read the log record - initial size 4096, scanLog needs
    // to resize if the log record is larger than that.
    ArrayInputStream    rawInput    = null;

    try
    {
            StreamLogScan scanLog;

      if (prepareStartAt == null)
            {
                // don't know where to start, scan from end of log
        scanLog =
                    (StreamLogScan) logFactory.openBackwardsScan(prepareStopAt);
            }
      else
      {
        if (prepareStartAt.lessThan(prepareStopAt))
                {
                    // nothing to prepare!
          return;
                }

        scanLog = (StreamLogScan)
          logFactory.openBackwardsScan(
                        ((LogCounter) prepareStartAt).getValueAsLong(),
                        prepareStopAt);
      }

      if (SanityManager.DEBUG)
        SanityManager.ASSERT(
                    scanLog != null, "cannot open log for prepare");

      rawInput    = new ArrayInputStream(new byte[4096]);

      LogRecord record;

      while ((record =
                    scanLog.getNextRecord(rawInput, prepareId, 0))
                       != null)
      {
        if (SanityManager.DEBUG)
        {
          SanityManager.ASSERT(
                        record.getTransactionId().equals(prepareId),
              "getNextRecord return unqualified log rec for prepare");
        }

        logrecordseen++;

        if (record.isCLR())
        {
          clrskipped++;

                    // the loggable is still in the input stream, get rid of it
          record.skipLoggable();

          // read the prepareInstant
          long prepareInstant = rawInput.readLong();

          if (SanityManager.DEBUG)
                    {
                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                        {
                            SanityManager.DEBUG(
                                LogToFile.DBG_FLAG,
                                    "Skipping over CLRs, reset scan to " +
                                    LogCounter.toDebugString(prepareInstant));
                        }
          }

          scanLog.resetPosition(new LogCounter(prepareInstant));
          // scanLog now positioned at the beginning of the log
          // record that was rolled back by this CLR.
          // The scan is a backward one so getNextRecord will skip
          // over the record that was rolled back and go to the one
          // previous to it

          continue;
        }

                if (record.requiresPrepareLocks())
                {
                    lop = record.getRePreparable();
                }
                else
                {
                    continue;
                }

        if (lop != null)
        {
                    // Reget locks based on log record.  reclaim all locks with
                    // a serializable locking policy, since we are only
                    // reclaiming write locks, isolation level does not matter
                    // much.

                    lop.reclaimPrepareLocks(
                        t,
                        t.newLockingPolicy(
                            LockingPolicy.MODE_RECORD,
                            TransactionController.ISOLATION_REPEATABLE_READ,
                            true));

          if (SanityManager.DEBUG)
                    {
                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                        {
                            SanityManager.DEBUG(
                                LogToFile.DBG_FLAG,
                                "Reprepare log record at instant " +
                                scanLog.getInstant() + " : " + lop);
                        }
                    }

        }
      }

    }
    catch (ClassNotFoundException cnfe)
    {
      throw logFactory.markCorrupt(
                StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
    }
      catch (IOException ioe)
    {
      throw logFactory.markCorrupt(
                StandardException.newException(
                    SQLState.LOG_READ_LOG_FOR_UNDO, ioe));
    }
    catch (StandardException se)
    {
      throw
                logFactory.markCorrupt(
                    StandardException.newException(
                        SQLState.LOG_UNDO_FAILED, se, 
                        prepareId, lop, (Object) null));
    }
    finally
    {
      if (rawInput != null)
      {
        try
        {
          rawInput.close();
        }
        catch (IOException ioe)
        {
          throw logFactory.markCorrupt(
                        StandardException.newException(
View Full Code Here

    Compensation  compensation = null;
    Undoable      lop          = null;

    // stream to read the log record - initial size 4096, scanLog needs
    // to resize if the log record is larget than that.
    ArrayInputStream    rawInput   = null;

    try
    {
      if (undoStartAt == null
            {
                // don't know where to start, rollback from end of log

        scanLog = (StreamLogScan)
          logFactory.openBackwardsScan(undoStopAt);
            }
      else
      {
        if (undoStartAt.lessThan(undoStopAt))
                {
                    // nothing to undo!
          return;
                }

        long undoStartInstant =
                    ((LogCounter) undoStartAt).getValueAsLong();

        scanLog = (StreamLogScan)
          logFactory.openBackwardsScan(undoStartInstant, undoStopAt);
      }

      if (SanityManager.DEBUG)
        SanityManager.ASSERT(
                    scanLog != null, "cannot open log for undo");

      rawInput   = new ArrayInputStream(new byte[4096]);

      LogRecord record;

      while ((record =
                    scanLog.getNextRecord(rawInput, undoId, 0))
                        != null)
      {
        if (SanityManager.DEBUG)
        {
          SanityManager.ASSERT(
                        record.getTransactionId().equals(undoId),
                        "getNextRecord return unqualified log record for undo");
        }

        logrecordseen++;

        if (record.isCLR())
        {
          clrskipped++;

                    // the loggable is still in the input stream, get rid of it
          record.skipLoggable();

          // read the undoInstant
          long undoInstant = rawInput.readLong();

          if (SanityManager.DEBUG)
                    {
                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                        {
                            SanityManager.DEBUG(
                                LogToFile.DBG_FLAG,
                                "Skipping over CLRs, reset scan to " +
                                LogCounter.toDebugString(undoInstant));
                        }
                    }


          scanLog.resetPosition(new LogCounter(undoInstant));

          // scanLog now positioned at the beginning of the log
          // record that was rolled back by this CLR.
          // The scan is a backward one so getNextRecord will skip
          // over the record that was rolled back and go to the one
          // previous to it

          continue;
        }

        lop = record.getUndoable();

        if (lop != null)
        {
          int optionalDataLength = rawInput.readInt();
          int savePosition = rawInput.getPosition();
          rawInput.setLimit(savePosition, optionalDataLength);
 
          compensation = lop.generateUndo(t, rawInput);

          if (SanityManager.DEBUG)
                    {
                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                        {
                            SanityManager.DEBUG(
                                LogToFile.DBG_FLAG,
                                "Rollback log record at instant " +
                                LogCounter.toDebugString(scanLog.getInstant()) +
                                " : " + lop);
                        }
                    }

          clrgenerated++;

          if (compensation != null)
          {
            // generateUndo may have read stuff off the
            // stream, reset it for the undo operation.
            rawInput.setLimit(savePosition, optionalDataLength);

            // log the compensation op that rolls back the
                        // operation at this instant
            t.logAndUndo(
                            compensation, new LogCounter(scanLog.getInstant()),
                            rawInput);

            compensation.releaseResource(t);
            compensation = null;
          }

          // if compensation is null, log operation is redo only
        }
        // if this is not an undoable operation, continue with next log
        // record
      }
    }
    catch (ClassNotFoundException cnfe)
    {
      throw logFactory.markCorrupt(
                StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
    }
      catch (IOException ioe)
    {
      throw logFactory.markCorrupt(
                StandardException.newException(
                    SQLState.LOG_READ_LOG_FOR_UNDO, ioe));
    }
    catch (StandardException se)
    {
            // TODO (4327) - exceptions caught here are nested in the exception
            // below but for some reason the nested exceptions are not logged
            // or reported in any way.

      throw logFactory.markCorrupt(
                StandardException.newException(
                    SQLState.LOG_UNDO_FAILED, se, undoId, lop, compensation));
    }
    finally
    {
      if (compensation != null)
            {
                // errored out
        compensation.releaseResource(t);
            }

      if (rawInput != null)
      {
        try
        {
          rawInput.close();
        }
        catch (IOException ioe)
        {
          throw logFactory.markCorrupt(
                        StandardException.newException(
View Full Code Here

    throws StandardException, IOException
  {
    int offset = getFieldOffset(slot, fieldNumber);

    // these reads are always against the page array
    ArrayInputStream lrdi = rawDataIn;

    // now write out the field we are interested in ...
    lrdi.setPosition(offset);
    int fieldStatus = StoredFieldHeader.readStatus(lrdi);
    int fieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, fieldStatus, slotFieldSize);

    StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
   
    if (fieldDataLength != 0) {
      // and then the data
      out.write(pageData, lrdi.getPosition(), fieldDataLength);
    }
  }
View Full Code Here

    logAction(instant);

    int offset = getFieldOffset(slot, fieldNumber);

    // get the field header information, the input stream came from the log
    ArrayInputStream lrdi = rawDataIn;
    lrdi.setPosition(offset);
    int oldFieldStatus = StoredFieldHeader.readStatus(lrdi);
    int oldFieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, oldFieldStatus, slotFieldSize);

    int newFieldStatus = StoredFieldHeader.readStatus(in);
    int newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
View Full Code Here

          "fieldNumber: " + fieldNumber +
          " start field: " + startField +
          " number of fields " + numberFields);
    }

    ArrayInputStream lrdi = rawDataIn;

    // skip the record header
    lrdi.setPosition(offset + recordHeader.size());

    // skip any earlier fields ...
    for (int i = startField; i < fieldNumber; i++) {
      skipField(lrdi);
    }
View Full Code Here

TOP

Related Classes of org.apache.derby.iapi.services.io.ArrayInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.