Package org.apache.derby.io

Examples of org.apache.derby.io.StorageRandomAccessFile


    // purpose of doing this is to test that recovery works correctly when
    // log records in the end of a log file did not get wrtten completely
    // and in the correct order.

    try{
      StorageRandomAccessFile log = logFactory.getLogFileToSimulateCorruption(filenum) ;
   
      int noWrites = (int) amountOfLogWritten / 512;
      //mess up few bytes in every block of a 512 bytes.
      filepos += 512;
      java.util.Random r = new java.util.Random();
      for(int i = 0 ; i < noWrites ; i++)
      {
        REPORT("corruptig log file : filenum " + filenum + " fileposition " + filepos);
        log.seek(filepos);
        log.writeInt(r.nextInt());
        filepos +=512;

      }
            log.sync();
      log.close();
    }catch(IOException ie)
    {
      throw T_Fail.exceptionFail(ie);
    }
   
View Full Code Here


    // generate random 4k of data and store the encrypted random data and the checksum
    // using MD5 of the unencrypted data. That way, on next database boot a check is performed
    // to verify if the key is the same as used when the database was created

        InputStream verifyKeyInputStream = null;
    StorageRandomAccessFile verifyKeyFile = null;
    byte[] data = new byte[VERIFYKEY_DATALEN];
    try
    {
      if(create)
      {
        getSecureRandom().nextBytes(data);
        // get the checksum
        byte[] checksum = getMD5Checksum(data);

        CipherProvider tmpCipherProvider = createNewCipher(ENCRYPT,mainSecretKey,mainIV);
        tmpCipherProvider.encrypt(data, 0, data.length, data, 0);
        // openFileForWrite
        verifyKeyFile = privAccessFile(sf,Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE,"rw");
        // write the checksum length as int, and then the checksum and then the encrypted data
        verifyKeyFile.writeInt(checksum.length);
        verifyKeyFile.write(checksum);
        verifyKeyFile.write(data);
                verifyKeyFile.sync();
      }
      else
      {
        // Read from verifyKey.dat as an InputStream. This allows for
                // reading the information from verifyKey.dat successfully even when using the jar
                // subprotocol to boot derby. (DERBY-1373)
        verifyKeyInputStream = privAccessGetInputStream(sf,Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);
                DataInputStream dis = new DataInputStream(verifyKeyInputStream);
        // then read the checksum length
        int checksumLen = dis.readInt();

        byte[] originalChecksum = new byte[checksumLen];
        dis.readFully(originalChecksum);

        dis.readFully(data);

        // decrypt data with key
        CipherProvider tmpCipherProvider = createNewCipher(DECRYPT,mainSecretKey,mainIV);
        tmpCipherProvider.decrypt(data, 0, data.length, data, 0);

        byte[] verifyChecksum = getMD5Checksum(data);

        if(!MessageDigest.isEqual(originalChecksum,verifyChecksum))
        {
          throw StandardException.newException(SQLState.ENCRYPTION_BAD_EXTERNAL_KEY);
        }

      }
    }
    catch(IOException ioe)
    {
      throw StandardException.newException(SQLState.ENCRYPTION_UNABLE_KEY_VERIFICATION,ioe);
    }
    finally
    {
      try
      {
        if(verifyKeyFile != null)
          verifyKeyFile.close();
                if (verifyKeyInputStream != null )
                    verifyKeyInputStream.close();
      }
      catch(IOException ioee)
      {
View Full Code Here

      return null;
    }


    StorageRandomAccessFile log = null;

    try
    {
            log = privRandomAccessFile(fileName, "r");

      // verify that the log file is of the right format
      if (!verifyLogFormat(log, filenum))
      {
        if (SanityManager.DEBUG)
        {
          if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
            SanityManager.DEBUG(LogToFile.DBG_FLAG, fileName.getPath() + " format mismatch");
        }

        log.close();
        log = null;
      }
      else
      {
        log.seek(filepos);
      }
    }
    catch (IOException ioe)
    {
      try
      {
        if (log != null)
        {
          log.close();
          log = null;
        }

        if (SanityManager.DEBUG)
        {
View Full Code Here

    if (SanityManager.DEBUG)
    {
      //long filenum = LogCounter.getLogFileNumber(logInstant);
      //      long filepos = LogCounter.getLogFilePosition(logInstant);
      StorageFile fileName = getLogFileName(filenum);
      StorageRandomAccessFile log = null;
      return privRandomAccessFile(fileName, "rw");
    }
   
    return null;
View Full Code Here

            /*
             * Open the highest log file and make sure log records are
             * appended at the end of it
             */

            StorageRandomAccessFile logFile = null;
            if(isWriteSynced) {
                logFile = openLogFileInWriteMode(
                              getLogFileName(logFileNumber));
            } else {
                logFile = privRandomAccessFile(getLogFileName(logFileNumber),
                                               "rw");
            }
            logOut = new LogAccessFile(this, logFile, logBufferSize);

            lastFlush = endPosition;
            logFile.seek(endPosition); // append log records at the end of
            // the file

        } catch (IOException ioe) {
            throw StandardException.newException
                (SQLState.REPLICATION_UNEXPECTED_EXCEPTION, ioe);
View Full Code Here

                isWriteSynced = false;
                return privRandomAccessFile(logFile, "rw");
            }
        }

    StorageRandomAccessFile log = privRandomAccessFile(logFile, "rwd");
    return log ;
  }
View Full Code Here

     * @exception StandardException Standard Derby exception
     */
    private boolean checkJvmSyncError(StorageFile logFile) throws IOException
    {
        boolean hasJvmSyncError = false;
        StorageRandomAccessFile rwsTest;

        // Normally this log file already exists but in case it does
        // not we open the file using "rw" mode. This is needed in
        // order to ensure that the file already exists when it is
        // opened in "rws" mode. This should succeed on all JVMs
        rwsTest = privRandomAccessFile(logFile, "rw");
        rwsTest.close();

        // Try to re-open the file in "rws" mode
        try{
            rwsTest = privRandomAccessFile(logFile, "rws");
            rwsTest.close();
        }
        catch (FileNotFoundException ex) {
            // Normally this exception should never occur. For some
            // reason currently on some Mac and FreeBSD JVM 1.4.2 and
            // 1.5 FileNotFoundException exception is thrown if a file
View Full Code Here

         case STUBBIFY_ACTION:
         {
             StorageFile file = privGetFileName( actionIdentity, false, false, true);
             StorageFile stub = privGetFileName( actionIdentity, true, false, false);

             StorageRandomAccessFile stubData = null;

             try
             {
                 // !!!!!
                 // bumpContainerVersion();
                 //
                 // do NOT bump the container version.  We WANT the stubbify
                 // operation to get redone every time.  This is because this
                 // operation first writes out the stub and then remove the
                 // container file.  If we bump the version, then the stub will
                 // contain the new version.  And if the system crashes right then,
                 // then we will skip the whole operation during redo even though
                 // the container file may not have been removed.  Since we don't
                 // want to have the remove happen before the stub is written, we
                 // cannot sync it and therefore cannot be sure the remove
                 // happened before the system crashed.

                 if (!stub.exists())
                 {
                     // write the header to the stub
                     stubData = stub.getRandomAccessFile( "rw");
                     stub.limitAccessToOwner();

                     writeRAFHeader(
                        actionIdentity,
                        stubData,
                                    true, /* create */
                                    true); /* sync */

                     stubData.close();
                     stubData = null;
                 }


                 // Force WAL and check for database corruption before removing file.
                 // This is one operation where the container is changed on disk
                 // directly without going thru the container cache, which otherwise
                 // would have force WAL.  Take care of it here.
                 dataFactory.flush(actionInstant);

                 // try to remove the container file
                 // fileDate is not null only if we are redoing a removeContainer
                 // (stubbify) operation.  Then fileData acutally is opened against
                 // the stub and the original container file does not exist.
                 // Then we need to close it here because this method is called by
                 // cache.remove and nobody will be able to see fileData after this.
                 privRemoveFile(file);

             }
             catch (SecurityException se)
             {
                 throw StandardException.
                     newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file,
                                  se.toString());
             }
             catch (IOException ioe)
             {
                 // exception thrown while in creating the stub.  Remove the
                 // (half-baked) stub
                 try
                 {
                     if (stubData != null)
                     {
                         stubData.close();
                         stub.delete();
                         stubData = null;
                     }

                     if (fileData != null)
View Full Code Here

        throws StandardException
    {
        BasePage page = null;
        StorageFile newFile =
            dataFactory.getStorageFactory().newStorageFile(newFilePath);
        StorageRandomAccessFile newRaf = null;
        try {
            long lastPageNumber= getLastPageNumber(handle);
            newRaf = getRandomAccessFile(newFile);

            byte[] encryptionBuf = null;
            if (doEncrypt) {
                encryptionBuf = new byte[pageSize];
            }

            // Copy all the pages from the current container to the new
            // container file after processing the pages.
            for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER;
                 pageNumber <= lastPageNumber; pageNumber++)
            {

                page = getLatchedPage(handle, pageNumber);
                       
                // Update the page array before writing to the disk.
                // An update consists of adding the container header, or
                // (re-)encrypting the data.
                byte[] dataToWrite = updatePageArray(pageNumber,
                                                     page.getPageArray(),
                                                     encryptionBuf,
                                                     true);
                newRaf.write(dataToWrite, 0, pageSize);

                // unlatch releases page from cache.
                page.unlatch();
                page = null;
            }

            // sync the new version of the container.
            newRaf.sync();
            newRaf.close();
            newRaf = null;
           
        }catch (IOException ioe) {
            throw StandardException.newException(
                                    SQLState.FILE_CONTAINER_EXCEPTION, ioe,
                                    getIdentity() != null ?
                                        getIdentity().toString() : "unknown",
                                    doEncrypt ? "encrypt" : "decrypt",
                                    newFilePath);
        } finally {

            if (page != null) {
                page.unlatch();
                page = null;
            }
           
            if (newRaf != null) {
                try {
                    newRaf.close();
                }catch (IOException ioe)
                {
                    newRaf = null;
                    throw StandardException.newException(
                                    SQLState.FILE_CONTAINER_EXCEPTION, ioe,
View Full Code Here

        /////////////////////////////////////////////////////////////
        //
        // determine where the log ends
        //
        /////////////////////////////////////////////////////////////
        StorageRandomAccessFile theLog = null;


        // if logend == LogCounter.INVALID_LOG_SCAN, that means there
                // is no log record in the log - most likely it is corrupted in
                // some way ...
        if (logEnd == LogCounter.INVALID_LOG_INSTANT)
        {
          Monitor.logTextMessage(MessageId.LOG_LOG_NOT_FOUND);

          StorageFile logFile = getLogFileName(logFileNumber);

                    if (privExists(logFile))
          {
            // if we can delete this strange corrupted file, do so,
            // otherwise, skip it
                        if (!privDelete(logFile))
            {
              logFile = getLogFileName(++logFileNumber);
            }
          }
          IOException accessException = null;
          try
          {
                        theLog =   privRandomAccessFile(logFile, "rw");
          }
          catch (IOException ioe)
          {
            theLog = null;
            accessException = ioe;
          }

                    if (theLog == null || !privCanWrite(logFile))
          {
            if (theLog != null)
              theLog.close();

            theLog = null;
            Monitor.logTextMessage(MessageId.LOG_CHANGED_DB_TO_READ_ONLY);
            if (accessException != null)
              Monitor.logThrowable(accessException);
            ReadOnlyDB = true;
          }
          else
          {
            try
            {
              // no previous log file or previous log position
              if (!initLogFile(
                                    theLog, logFileNumber,
                                    LogCounter.INVALID_LOG_INSTANT))
                            {
                throw markCorrupt(
                                    StandardException.newException(
                                        SQLState.LOG_SEGMENT_NOT_EXIST,
                                        logFile.getPath()));
                            }
            }
            catch (IOException ioe)
            {
              throw markCorrupt(
                                StandardException.newException(
                                    SQLState.LOG_IO_ERROR, ioe));
            }

                        // successfully init'd the log file - set up markers,
                        // and position at the end of the log.
            setEndPosition( theLog.getFilePointer() );
            lastFlush   = endPosition;
           
            //if write sync is true , prellocate the log file
            //and reopen the file in rwd mode.
            if(isWriteSynced)
            {
              //extend the file by wring zeros to it
              preAllocateNewLogFile(theLog);
              theLog.close();
              theLog = openLogFileInWriteMode(logFile);
              //postion the log at the current end postion
              theLog.seek(endPosition);
            }
           
            if (SanityManager.DEBUG)
            {
              SanityManager.ASSERT(
                                endPosition == LOG_FILE_HEADER_SIZE,
                                "empty log file has wrong size");
            }
           
            //because we already incrementing the log number
            //here, no special log switch required for
            //backup recoveries.
            logSwitchRequired = false;
          }
        }
        else
        {
          // logEnd is the instant of the next log record in the log
          // it is used to determine the last known good position of
          // the log
          logFileNumber = LogCounter.getLogFileNumber(logEnd);

          ReadOnlyDB = df.isReadOnly();

          StorageFile logFile = getLogFileName(logFileNumber);

          if (!ReadOnlyDB)
          {
            // if datafactory doesn't think it is readonly, we can
            // do some futher test of our own
            IOException accessException = null;
            try
            {
              if(isWriteSynced)
                theLog = openLogFileInWriteMode(logFile);
              else
                theLog = privRandomAccessFile(logFile, "rw");
            }
            catch (IOException ioe)
            {
              theLog = null;
                            accessException = ioe;
            }
                        if (theLog == null || !privCanWrite(logFile))
            {
              if (theLog != null)
                theLog.close();
              theLog = null;
              Monitor.logTextMessage(MessageId.LOG_CHANGED_DB_TO_READ_ONLY);
              if (accessException != null)
                Monitor.logThrowable(accessException)
              ReadOnlyDB = true;
                     
            }
          }

          if (!ReadOnlyDB)
          {
            setEndPosition( LogCounter.getLogFilePosition(logEnd) );

            //
            // The end of the log is at endPosition.  Which is where
            // the next log should be appending.
            //
            // if the last log record ends before the end of the
                        // log file, then this log file has a fuzzy end.
                        // Zap all the bytes to between endPosition to EOF to 0.
            //
            // the end log marker is 4 bytes (of zeros)
            //
            // if endPosition + 4 == logOut.length, we have a
                        // properly terminated log file
            //
            // if endPosition + 4 is > logOut.length, there are 0,
                        // 1, 2, or 3 bytes of 'fuzz' at the end of the log. We
                        // can ignore that because it is guaranteed to be
                        // overwritten by the next log record.
            //
            // if endPosition + 4 is < logOut.length, we have a
                        // partial log record at the end of the log.
            //
            // We need to overwrite all of the incomplete log
                        // record, because if we start logging but cannot
                        // 'consume' all the bad log, then the log will truly
                        // be corrupted if the next 4 bytes (the length of the
                        // log record) after that is small enough that the next
                        // time the database is recovered, it will be
                        // interpreted that the whole log record is in the log
                        // and will try to objectify, only to get classNotFound
                        // error or worse.
            //

            //find out if log had incomplete log records at the end.
            if (redoScan.isLogEndFuzzy())
            {
              theLog.seek(endPosition);
              long eof = theLog.length();

              Monitor.logTextMessage(MessageId.LOG_INCOMPLETE_LOG_RECORD,
                logFile, new Long(endPosition), new Long(eof));

              /* Write zeros from incomplete log record to end of file */
              long nWrites = (eof - endPosition)/logBufferSize;
              int rBytes = (int)((eof - endPosition) % logBufferSize);
              byte zeroBuf[]= new byte[logBufferSize];
             
              //write the zeros to file
              while(nWrites-- > 0)
                theLog.write(zeroBuf);
              if(rBytes !=0)
                theLog.write(zeroBuf, 0, rBytes);
             
              if(!isWriteSynced)
                syncFile(theLog);
            }

            if (SanityManager.DEBUG)
            {
              if (theLog.length() != endPosition)
              {
                SanityManager.ASSERT(
                                    theLog.length() > endPosition,
                                    "log end > log file length, bad scan");
              }
            }

            // set the log to the true end position,
                        // and not the end of the file

            lastFlush = endPosition;
            theLog.seek(endPosition);
          }
        }

        if (theLog != null)
                {
View Full Code Here

TOP

Related Classes of org.apache.derby.io.StorageRandomAccessFile

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.