Package freenet.client

Examples of freenet.client.InsertException


            if(logMINOR) Logger.minor(this, "Encoded "+this+" for "+parent);
        } catch (IOException e) {
            parent.failOnDiskError(e);
        } catch (Throwable t) {
            Logger.error(this, "Failed: "+t, t);
            parent.fail(new InsertException(InsertExceptionMode.INTERNAL_ERROR, t, null));
        } finally {
            if(lock != null) lock.unlock();
        }
    }
View Full Code Here


              } catch(Throwable t) {
                Logger.error(this, "Caught in OffThreadCompressor: " + t, t);
                System.err.println("Caught in OffThreadCompressor: " + t);
                t.printStackTrace();
                // Try to fail gracefully
                finalJob.onFailure(new InsertException(InsertExceptionMode.INTERNAL_ERROR, t, null), null, context);
              }

          } catch(Throwable t) {
            Logger.error(this, "Caught " + t + " in " + this, t);
          } finally {
View Full Code Here

            int mustSucceed = storage.topRequiredBlocks - topRequiredBlocks;
            parent.addMustSucceedBlocks(mustSucceed);
            parent.addRedundantBlocksInsert(storage.topTotalBlocks - topTotalBlocks - mustSucceed);
            parent.notifyClients(context);
        } catch (IOException e) {
            throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
        }
        this.raf = storage.getRAF();
        this.sender = new SplitFileInserterSender(this, storage);
        this.realTime = realTime;
        this.token = token;
View Full Code Here

        return parent;
    }

    @Override
    public void cancel(ClientContext context) {
        storage.fail(new InsertException(InsertExceptionMode.CANCELLED));
    }
View Full Code Here

            raf.close();
            raf.free();
            originalData.close();
            if(freeData)
                originalData.free();
            throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
        } catch (StorageFormatException e) {
            Logger.error(this, "Resume failed: "+e, e);
            raf.close();
            raf.free();
            originalData.close();
            if(freeData)
                originalData.free();
            throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
        } catch (ChecksumFailedException e) {
            Logger.error(this, "Resume failed: "+e, e);
            raf.close();
            raf.free();
            originalData.close();
            if(freeData)
                originalData.free();
            throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
        }
    }
View Full Code Here

                        Metadata metadata = storage.encodeMetadata();
                        reportMetadata(metadata);
                        if(ctx.getCHKOnly)
                            onSucceeded(metadata);
                    } catch (IOException e) {
                        storage.fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null));
                    } catch (MissingKeyException e) {
                        storage.fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, "Lost one or more keys", e, null));
                    }
                    return false;
                }
               
            });
View Full Code Here

    } catch (PersistenceDisabledException e) {
      Logger.error(this, "Database disabled compressing data", new Exception("error"));
      if(bestCompressedData != null && bestCompressedData != origData)
        bestCompressedData.free();
    } catch (InvalidCompressionCodecException e) {
      fail(new InsertException(InsertExceptionMode.INTERNAL_ERROR, e, null), context, bestCompressedData);
    } catch (final IOException e) {
      fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null), context, bestCompressedData);
   
  }
View Full Code Here

        this.originalData = originalData;
        this.callback = callback;
        this.persistent = persistent;
        dataLength = originalData.size();
        if (dataLength > ((long) Integer.MAX_VALUE) * CHKBlock.DATA_LENGTH)
            throw new InsertException(InsertExceptionMode.TOO_BIG);
        totalDataBlocks = (int) ((dataLength + CHKBlock.DATA_LENGTH - 1) / CHKBlock.DATA_LENGTH);
        this.decompressedLength = decompressedLength;
        this.compressionCodec = compressionCodec;
        this.clientMetadata = meta;
        this.checker = checker;
        this.memoryLimitedJobRunner = memoryLimitedJobRunner;
        this.jobRunner = jobRunner;
        this.isMetadata = isMetadata;
        this.archiveType = archiveType;
        this.hashThisLayerOnly = hashThisLayerOnly;
        this.topDontCompress = topDontCompress;
        this.origDataSize = origDataSize;
        this.origCompressedDataSize = origCompressedDataSize;
        this.maxRetries = ctx.maxInsertRetries;
        this.errors = new FailureCodeTracker(true);
        this.ticker = ticker;
        this.random = random;

        // Work out how many blocks in each segment, crypto keys etc.
        // Complicated by back compatibility, i.e. the need to be able to
        // reinsert old splitfiles.
        // FIXME consider getting rid of support for very old splitfiles.

        int segs;
        cmode = ctx.getCompatibilityMode();
        if(cmode.ordinal() < CompatibilityMode.COMPAT_1255.ordinal()) {
            this.hashes = null;
            splitfileCryptoKey = null;
        } else {
            this.hashes = hashes;
        }
        if (cmode == CompatibilityMode.COMPAT_1250_EXACT) {
            segs = (totalDataBlocks + 128 - 1) / 128;
            segmentSize = 128;
            deductBlocksFromSegments = 0;
        } else {
            if (cmode == CompatibilityMode.COMPAT_1251) {
                // Max 131 blocks per segment.
                segs = (totalDataBlocks + 131 - 1) / 131;
            } else {
                // Algorithm from evanbd, see bug #2931.
                if (totalDataBlocks > 520) {
                    segs = (totalDataBlocks + 128 - 1) / 128;
                } else if (totalDataBlocks > 393) {
                    // maxSegSize = 130;
                    segs = 4;
                } else if (totalDataBlocks > 266) {
                    // maxSegSize = 131;
                    segs = 3;
                } else if (totalDataBlocks > 136) {
                    // maxSegSize = 133;
                    segs = 2;
                } else {
                    // maxSegSize = 136;
                    segs = 1;
                }
            }
            int segSize = (totalDataBlocks + segs - 1) / segs;
            if (ctx.splitfileSegmentDataBlocks < segSize) {
                segs = (totalDataBlocks + ctx.splitfileSegmentDataBlocks - 1)
                        / ctx.splitfileSegmentDataBlocks;
                segSize = (totalDataBlocks + segs - 1) / segs;
            }
            segmentSize = segSize;
            if (cmode == CompatibilityMode.COMPAT_CURRENT
                    || cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal()) {
                // Even with basic even segment splitting, it is possible for
                // the last segment to be a lot smaller than the rest.
                // So drop a single data block from each of the last
                // [segmentSize-lastSegmentSize] segments instead.
                // Hence all the segments are within 1 block of segmentSize.
                int lastSegmentSize = totalDataBlocks - (segmentSize * (segs - 1));
                deductBlocksFromSegments = segmentSize - lastSegmentSize;
            } else {
                deductBlocksFromSegments = 0;
            }
        }

        int crossCheckBlocks = 0;

        // Cross-segment splitfile redundancy becomes useful at 20 segments.
        if (segs >= 20
                && (cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1255
                        .ordinal())) {
            // The optimal number of cross-check blocks per segment (and per
            // cross-segment since there are the same number of cross-segments
            // as segments) is 3.
            crossCheckBlocks = 3;
        }

        this.crossCheckBlocks = crossCheckBlocks;

        this.splitfileType = ctx.getSplitfileAlgorithm();
        this.codec = FECCodec.getInstance(splitfileType);

        checkSegmentSize = codec.getCheckBlocks(segmentSize + crossCheckBlocks, cmode);

        this.splitfileCryptoAlgorithm = splitfileCryptoAlgorithm;
        if (splitfileCryptoKey != null) {
            this.splitfileCryptoKey = splitfileCryptoKey;
            specifySplitfileKeyInMetadata = true;
        } else if (cmode == CompatibilityMode.COMPAT_CURRENT
                || cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal()) {
            if (hashThisLayerOnly != null) {
                this.splitfileCryptoKey = Metadata.getCryptoKey(hashThisLayerOnly);
            } else {
                this.splitfileCryptoKey = Metadata.getCryptoKey(hashes);
            }
            specifySplitfileKeyInMetadata = false;
        } else {
            this.splitfileCryptoKey = null;
            specifySplitfileKeyInMetadata = false;
        }

        int totalCheckBlocks = 0;
        int checkTotalDataBlocks = 0;
        underlyingOffsetDataSegments = new long[segs];
        keyLength = SplitFileInserterSegmentStorage.getKeyLength(this);
        this.consecutiveRNFsCountAsSuccess = ctx.consecutiveRNFsCountAsSuccess;
        segments = makeSegments(segmentSize, segs, totalDataBlocks, crossCheckBlocks,
                deductBlocksFromSegments, persistent,
                cmode, random, keysFetching, consecutiveRNFsCountAsSuccess);
        for (SplitFileInserterSegmentStorage segment : segments) {
            totalCheckBlocks += segment.checkBlockCount;
            checkTotalDataBlocks += segment.dataBlockCount;
        }
        assert (checkTotalDataBlocks == totalDataBlocks);
        this.totalCheckBlocks = totalCheckBlocks;

        if (crossCheckBlocks != 0) {
            byte[] seed = Metadata.getCrossSegmentSeed(hashes, hashThisLayerOnly);
            if (logMINOR)
                Logger.minor(this, "Cross-segment seed: " + HexUtil.bytesToHex(seed));
            Random xsRandom = new MersenneTwister(seed);
            // Cross segment redundancy: Allocate the blocks.
            crossSegments = new SplitFileInserterCrossSegmentStorage[segs];
            int segLen = segmentSize;
            for (int i = 0; i < crossSegments.length; i++) {
                if (logMINOR)
                    Logger.minor(this, "Allocating blocks for cross segment " + i);
                if (segments.length - i == deductBlocksFromSegments) {
                    segLen--;
                }

                SplitFileInserterCrossSegmentStorage seg = new SplitFileInserterCrossSegmentStorage(
                        this, i, persistent, segLen, crossCheckBlocks);
                crossSegments[i] = seg;
                for (int j = 0; j < segLen; j++) {
                    // Allocate random data blocks
                    allocateCrossDataBlock(seg, xsRandom);
                }
                for (int j = 0; j < crossCheckBlocks; j++) {
                    // Allocate check blocks
                    allocateCrossCheckBlock(seg, xsRandom);
                }
            }
        } else {
            crossSegments = null;
        }

        // Now set up the RAF.
       
        // Setup offset arrays early so we can compute the length of encodeOffsets().
        if(crossSegments != null) {
            offsetCrossSegmentBlocks = new long[crossSegments.length];
            if(persistent)
                offsetCrossSegmentStatus = new long[crossSegments.length];
            else
                offsetCrossSegmentStatus = null;
        } else {
            offsetCrossSegmentBlocks = null;
            offsetCrossSegmentStatus = null;
        }
       
        offsetSegmentCheckBlocks = new long[segments.length];
       
        offsetSegmentKeys = new long[segments.length];
        if(persistent) {
            offsetSegmentStatus = new long[segments.length];
        } else {
            offsetSegmentStatus = null;
        }

        // First we have all the fixed stuff ...

        byte[] paddedLastBlock = null;
        if (dataLength % CHKBlock.DATA_LENGTH != 0) {
            this.hasPaddedLastBlock = true;
            long from = (dataLength / CHKBlock.DATA_LENGTH) * CHKBlock.DATA_LENGTH;
            byte[] buf = new byte[(int) (dataLength - from)];
            this.originalData.pread(from, buf, 0, buf.length);
            paddedLastBlock = BucketTools.pad(buf, CHKBlock.DATA_LENGTH, buf.length);
        } else {
            this.hasPaddedLastBlock = false;
        }
       
        byte[] header = null;
        Bucket segmentSettings = null, crossSegmentSettings = null;
        int offsetsLength = 0;
        if (persistent) {
            header = encodeHeader();
            offsetsLength = encodeOffsets().length;

            segmentSettings = encodeSegmentSettings(); // Checksummed with length
            try {
                crossSegmentSettings = encodeCrossSegmentSettings(bf); // Checksummed with length
            } catch (IOException e) {
                throw new InsertException(InsertExceptionMode.BUCKET_ERROR,
                        "Failed to write to temporary storage while creating splitfile inserter",
                        null);
            }
        }

View Full Code Here

                        synchronized(this) {
                            status = Status.SUCCEEDED;
                        }
                        callback.onSucceeded(metadata);
                    } catch (IOException e) {
                        InsertException e1 = new InsertException(InsertExceptionMode.BUCKET_ERROR);
                        synchronized(this) {
                            failing = e1;
                            status = Status.FAILED;
                        }
                        callback.onFailed(e1);
                    } catch (MissingKeyException e) {
                        // Fail here too. If we're getting disk corruption on keys, we're probably
                        // getting it on the original data too.
                        InsertException e1 = new InsertException(InsertExceptionMode.BUCKET_ERROR, "Missing keys", null);
                        synchronized(this) {
                            failing = e1;
                            status = Status.FAILED;
                        }
                        callback.onFailed(e1);
View Full Code Here

    private boolean maybeFail() {
        // Might have failed.
        // Have to check segments before checking for failure because of race conditions.
        if(allSegmentsCompletedOrFailed()) {
            InsertException e = null;
            synchronized(this) {
                if(failing == null) return false;
                e = failing;
                if(hasFinished()) {
                    if(logMINOR) Logger.minor(this, "Maybe fail returning true because already finished");
View Full Code Here

TOP

Related Classes of freenet.client.InsertException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.