Package freenet.client

Examples of freenet.client.InsertException


    } else if(type.equals("CHK")) {
      blockSize = CHKBlock.DATA_LENGTH;
      oneBlockCompressedSize = CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
      isCHK = true;
    } else {
      throw new InsertException(InsertExceptionMode.INVALID_URI, "Unknown key type: "+type, null);
    }
   
    // Compressed data ; now insert it
    // We do NOT need to switch threads here: the actual compression is done by InsertCompressor on the RealCompressor thread,
    // which then switches either to the database thread or to a new executable to run this method.
   
    if(parent == cb) {
      short codecID = bestCodec == null ? -1 : bestCodec.metadataID;
      ctx.eventProducer.produceEvent(new FinishedCompressionEvent(codecID, origSize, bestCompressedDataSize), context);
      if(logMINOR) Logger.minor(this, "Compressed "+origSize+" to "+data.size()+" on "+this+" data = "+data);
    }
   
    // Insert it...
    short codecNumber = bestCodec == null ? -1 : bestCodec.metadataID;
    long compressedDataSize = data.size();
    boolean fitsInOneBlockAsIs = bestCodec == null ? compressedDataSize <= blockSize : compressedDataSize <= oneBlockCompressedSize;
    boolean fitsInOneCHK = bestCodec == null ? compressedDataSize <= CHKBlock.DATA_LENGTH : compressedDataSize <= CHKBlock.MAX_COMPRESSED_DATA_LENGTH;

    if((fitsInOneBlockAsIs || fitsInOneCHK) && origSize > Integer.MAX_VALUE)
      throw new InsertException(InsertExceptionMode.INTERNAL_ERROR, "2GB+ should not encode to one block!", null);

    boolean noMetadata = ((block.clientMetadata == null) || block.clientMetadata.isTrivial()) && targetFilename == null;
    if((noMetadata || metadata) && archiveType == null) {
      if(fitsInOneBlockAsIs) {
        if(persistent && (data instanceof NotPersistentBucket))
          data = fixNotPersistent(data, context);
        // Just insert it
        ClientPutState bi =
          createInserter(parent, data, codecNumber, ctx, cb, metadata, (int)origSize, -1, true, context, shouldFreeData, forSplitfile);
        if(logMINOR)
          Logger.minor(this, "Inserting without metadata: "+bi+" for "+this);
        cb.onTransition(this, bi, context);
        if(ctx.earlyEncode && bi instanceof SingleBlockInserter && isCHK)
          ((SingleBlockInserter)bi).getBlock(context, true);
        bi.schedule(context);
        if(!isUSK)
          cb.onBlockSetFinished(this, context);
        synchronized(this) {
            started = true;
        }
        if(persistent) {
          block.nullData();
          block = null;
        }
        return;
      }
    }
    if (fitsInOneCHK) {
      // Insert single block, then insert pointer to it
      if(persistent && (data instanceof NotPersistentBucket)) {
        data = fixNotPersistent(data, context);
      }
      if(reportMetadataOnly) {
        SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, cb, metadata, (int)origSize, -1, true, true, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
        if(logMINOR)
          Logger.minor(this, "Inserting with metadata: "+dataPutter+" for "+this);
        Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
        cb.onMetadata(meta, this, context);
        cb.onTransition(this, dataPutter, context);
        dataPutter.schedule(context);
        if(!isUSK)
          cb.onBlockSetFinished(this, context);
        synchronized(this) {
          // Don't delete them because they are being passed on.
          origHashes = null;
        }
      } else {
        MultiPutCompletionCallback mcb =
          new MultiPutCompletionCallback(cb, parent, token, persistent, false, ctx.earlyEncode);
        SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, mcb, metadata, (int)origSize, -1, true, false, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
        if(logMINOR)
          Logger.minor(this, "Inserting data: "+dataPutter+" for "+this);
        Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
        RandomAccessBucket metadataBucket;
        try {
          metadataBucket = meta.toBucket(context.getBucketFactory(persistent));
        } catch (IOException e) {
          Logger.error(this, "Caught "+e, e);
          throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
        } catch (MetadataUnresolvedException e) {
          // Impossible, we're not inserting a manifest.
          Logger.error(this, "Caught "+e, e);
          throw new InsertException(InsertExceptionMode.INTERNAL_ERROR, "Got MetadataUnresolvedException in SingleFileInserter: "+e.toString(), null);
        }
        ClientPutState metaPutter = createInserter(parent, metadataBucket, (short) -1, ctx, mcb, true, (int)origSize, -1, true, context, true, false);
        if(logMINOR)
          Logger.minor(this, "Inserting metadata: "+metaPutter+" for "+this);
        mcb.addURIGenerator(metaPutter);
        mcb.add(dataPutter);
        cb.onTransition(this, mcb, context);
        Logger.minor(this, ""+mcb+" : data "+dataPutter+" meta "+metaPutter);
        mcb.arm(context);
        dataPutter.schedule(context);
        if(ctx.earlyEncode && metaPutter instanceof SingleBlockInserter)
          ((SingleBlockInserter)metaPutter).getBlock(context, true);
        metaPutter.schedule(context);
        if(!isUSK)
          cb.onBlockSetFinished(this, context);
        // Deleting origHashes is fine, we are done with them.
      }
      synchronized(this) {
          started = true;
      }
      if(persistent) {
        block.nullData();
        block = null;
      }
      return;
    }
    // Otherwise the file is too big to fit into one block
    // We therefore must make a splitfile
    // Job of SplitHandler: when the splitinserter has the metadata,
    // insert it. Then when the splitinserter has finished, and the
    // metadata insert has finished too, tell the master callback.
    LockableRandomAccessBuffer dataRAF;
        try {
            dataRAF = data.toRandomAccessBuffer();
        } catch (IOException e) {
            throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
        }
    if(reportMetadataOnly) {
      SplitFileInserter sfi = new SplitFileInserter(persistent, parent, cb,
              dataRAF, shouldFreeData, ctx, context, origSize, bestCodec,
              block.clientMetadata, metadata, archiveType, cryptoAlgorithm, forceCryptoKey,
View Full Code Here


      data.free();
      data = newData;
      }
    } catch (IOException e) {
      Logger.error(this, "Caught "+e+" while copying non-persistent data", e);
      throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
    }
    // Note that SegmentedBCB *does* support splitting, so we don't need to do anything to the data
    // if it doesn't fit in a single block.
    return data;
  }
View Full Code Here

      oneBlockCompressedSize = SSKBlock.MAX_COMPRESSED_DATA_LENGTH;
    } else if(type.equals("CHK")) {
      blockSize = CHKBlock.DATA_LENGTH;
      oneBlockCompressedSize = CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
    } else {
      throw new InsertException(InsertExceptionMode.INVALID_URI, "Unknown key type: "+type, null);
    }
   
    // We always want SHA256, even for small files.
    long wantHashes = 0;
    CompatibilityMode cmode = ctx.getCompatibilityMode();
    boolean atLeast1254 = (cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal());
    if(atLeast1254) {
      // We verify this. We want it for *all* files.
      wantHashes |= HashType.SHA256.bitmask;
      // FIXME: If the user requests it, calculate the others for small files.
      // FIXME maybe the thresholds should be configurable.
      if(data.size() >= 1024*1024 && !metadata) {
        // SHA1 is common and MD5 is cheap.
        wantHashes |= HashType.SHA1.bitmask;
        wantHashes |= HashType.MD5.bitmask;
      }
      if(data.size() >= 4*1024*1024 && !metadata) {
        // Useful for cross-network, and cheap.
        wantHashes |= HashType.ED2K.bitmask;
        // Very widely supported for cross-network.
        wantHashes |= HashType.TTH.bitmask;
        // For completeness.
        wantHashes |= HashType.SHA512.bitmask;
      }
    }
    boolean tryCompress = (origSize > blockSize) && (!ctx.dontCompress) && (!dontCompress);
    if(tryCompress) {
      InsertCompressor.start(context, this, origData, oneBlockCompressedSize, context.getBucketFactory(persistent), persistent, wantHashes, !atLeast1254);
    } else {
      if(logMINOR) Logger.minor(this, "Not compressing "+origData+" size = "+origSize+" block size = "+blockSize);
      HashResult[] hashes = null;
      if(wantHashes != 0) {
        // Need to get the hashes anyway
        NullOutputStream nos = new NullOutputStream();
        MultiHashOutputStream hasher = new MultiHashOutputStream(nos, wantHashes);
        try {
          BucketTools.copyTo(data, hasher, data.size());
        } catch (IOException e) {
          throw new InsertException(InsertExceptionMode.BUCKET_ERROR, "I/O error generating hashes", e, null);
        }
        hashes = hasher.getResults();
      }
      final CompressionOutput output = new CompressionOutput(data, null, hashes);
      context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() {
View Full Code Here

    if(uri.getKeyType().equals("USK")) {
      try {
        return new USKInserter(parent, data, compressionCodec, uri, ctx, cb, isMetadata, sourceLength, token,
          addToParent, this.token, context, freeData, persistent, realTimeFlag, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
      } catch (MalformedURLException e) {
        throw new InsertException(InsertExceptionMode.INVALID_URI, e, null);
      }
    } else {
      SingleBlockInserter sbi =
        new SingleBlockInserter(parent, data, compressionCodec, uri, ctx, realTimeFlag, cb, isMetadata, sourceLength, token,
            addToParent, false, this.token, context, persistent, freeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
View Full Code Here

    }
    if(freeData) {
      block.free();
    }
    // Must call onFailure so get removeFrom()'ed
    cb.onFailure(new InsertException(InsertExceptionMode.CANCELLED), this, context);
  }
View Full Code Here

      fail(e, context);
    }

    @Override
    public void onMetadata(Metadata meta, ClientPutState state, ClientContext context) {
      InsertException e = null;
      if(logMINOR) Logger.minor(this, "Got metadata for "+this+" from "+state);
      synchronized(this) {
        if(finished) return;
        if(reportMetadataOnly) {
          if(state != sfi) {
            Logger.error(this, "Got metadata from unknown object "+state+" when expecting to report metadata");
            return;
          }
          metaInsertSuccess = true;
        } else if(state == metadataPutter) {
          Logger.error(this, "Got metadata for metadata");
          e = new InsertException(InsertExceptionMode.INTERNAL_ERROR, "Did not expect to get metadata for metadata inserter", null);
        } else if(state != sfi) {
          Logger.error(this, "Got metadata from unknown state "+state+" sfi="+sfi+" metadataPutter="+metadataPutter+" on "+this+" persistent="+persistent, new Exception("debug"));
          e = new InsertException(InsertExceptionMode.INTERNAL_ERROR, "Got metadata from unknown state", null);
        } else {
          // Already started metadata putter ? (in which case we've got the metadata twice)
          if(metadataPutter != null) return;
          if(metaInsertSuccess) return;
        }
      }
      if(reportMetadataOnly) {
        cb.onMetadata(meta, this, context);
        return;
      }
      if(e != null) {
        onFailure(e, state, context);
        return;
      }
     
      byte[] metaBytes;
      try {
        metaBytes = meta.writeToByteArray();
      } catch (MetadataUnresolvedException e1) {
        Logger.error(this, "Impossible: "+e1, e1);
        fail((InsertException)new InsertException(InsertExceptionMode.INTERNAL_ERROR, "MetadataUnresolvedException in SingleFileInserter.SplitHandler: "+e1, null).initCause(e1), context);
        return;
      }
     
      String metaPutterTargetFilename = targetFilename;
     
      if(targetFilename != null) {
       
        if(metaBytes.length <= Short.MAX_VALUE) {
          HashMap<String, Object> hm = new HashMap<String, Object>();
          hm.put(targetFilename, meta);
          meta = Metadata.mkRedirectionManifestWithMetadata(hm);
          metaPutterTargetFilename = null;
          try {
            metaBytes = meta.writeToByteArray();
          } catch (MetadataUnresolvedException e1) {
            Logger.error(this, "Impossible (2): "+e1, e1);
            fail((InsertException)new InsertException(InsertExceptionMode.INTERNAL_ERROR, "MetadataUnresolvedException in SingleFileInserter.SplitHandler(2): "+e1, null).initCause(e1), context);
            return;
          }
        }
      }
     
      RandomAccessBucket metadataBucket;
      try {
        metadataBucket = BucketTools.makeImmutableBucket(context.getBucketFactory(persistent), metaBytes);
      } catch (IOException e1) {
        InsertException ex = new InsertException(InsertExceptionMode.BUCKET_ERROR, e1, null);
        fail(ex, context);
        return;
      }
      ClientMetadata m = meta.getClientMetadata();
      CompatibilityMode cmode = ctx.getCompatibilityMode();
      if(!(cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal()))
        m = null;
      if(metadataThreshold > 0 && metaBytes.length < metadataThreshold) {
        // FIXME what to do about m ???
        // I.e. do the other layers of metadata already include the content type?
        // It's probably already included in the splitfile, but need to check that, and test it.
        synchronized(this) {
          metaInsertSuccess = true;
        }
        cb.onMetadata(metadataBucket, state, context);
        return;
      }
      InsertBlock newBlock = new InsertBlock(metadataBucket, m, block.desiredURI);
      synchronized(this) {
          // Only the bottom layer in a multi-level splitfile pyramid has randomised keys. The rest are unpredictable anyway, and this ensures we only need to supply one key when reinserting.
          metadataPutter = new SingleFileInserter(parent, this, newBlock, true, ctx, realTimeFlag, false, false, token, archiveType, true, metaPutterTargetFilename, true, persistent, origDataLength, origCompressedDataLength, origHashes, cryptoAlgorithm, forceCryptoKey, metadataThreshold);
          if(origHashes != null) {
              // It gets passed on, and the last one deletes it.
              SingleFileInserter.this.origHashes = null;
          }
          // If EarlyEncode, then start the metadata insert ASAP, to get the key.
          // Otherwise, wait until the data is fetchable (to improve persistence).
          if(logMINOR)
              Logger.minor(this, "Created metadata putter for "+this+" : "+metadataPutter+" bucket "+metadataBucket+" size "+metadataBucket.size());
          if(!(ctx.earlyEncode || splitInsertSuccess)) return;
      }
      if(logMINOR) Logger.minor(this, "Putting metadata on "+metadataPutter+" from "+sfi+" ("+((SplitFileInserter)sfi).getLength()+ ')');
      if(!startMetadata(context)) {
        Logger.error(this, "onMetadata() yet unable to start metadata due to not having all URIs?!?!");
        fail(new InsertException(InsertExceptionMode.INTERNAL_ERROR, "onMetadata() yet unable to start metadata due to not having all URIs", null), context);
        return;
      }
      synchronized(this) {
        if(splitInsertSuccess && sfi != null) {
          sfi = null;
View Full Code Here

  public void cancel(ClientContext context) {
    for(MySendableInsert inserter: inserters) {
      if(inserter != null)
        inserter.cancel(context);
    }
    parent.onFailure(new InsertException(InsertExceptionMode.CANCELLED), this, context);
  }
View Full Code Here

      wasFatal = fatal;
    }
    if(success) {
      parent.onSuccess(this, context);
    } else if(wasFatal)
      parent.onFailure(new InsertException(InsertExceptionMode.FATAL_ERRORS_IN_BLOCKS, errors, null), this, context);
    else
      parent.onFailure(new InsertException(InsertExceptionMode.TOO_MANY_RETRIES_IN_BLOCKS, errors, null), this, context);
  }
View Full Code Here

  }

    @Override
    public void onResume(ClientContext context) throws InsertException {
        // TODO binary blob inserter isn't persistent yet, right?
        throw new InsertException(InsertExceptionMode.INTERNAL_ERROR, "Persistence not supported yet", null);
    }
View Full Code Here

    public void onFailure(LowLevelPutException e, SendableRequestItem keyNum, ClientContext context) {
      synchronized(BinaryBlobInserter.this) {
        if(inserters[blockNum] == null) return;
      }
      if(parent.isCancelled()) {
        fail(new InsertException(InsertExceptionMode.CANCELLED), true, context);
        return;
      }
      logMINOR = Logger.shouldLog(LogLevel.MINOR, BinaryBlobInserter.this);
      switch(e.code) {
      case LowLevelPutException.COLLISION:
        fail(new InsertException(InsertExceptionMode.COLLISION), false, context);
        break;
      case LowLevelPutException.INTERNAL_ERROR:
        errors.inc(InsertExceptionMode.INTERNAL_ERROR);
        break;
      case LowLevelPutException.REJECTED_OVERLOAD:
View Full Code Here

TOP

Related Classes of freenet.client.InsertException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.