Examples of LogEvent


Examples of org.gudy.azureus2.core3.logging.LogEvent

            event.data = eventData;
          }
        });
      }
    } catch (Throwable t) {
      Logger.log(new LogEvent(LogIDs.GUI, "failed to init drag-n-drop", t));
    }
  }
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

    try{
      SESecurityManagerBC.initialise();
     
    }catch( Throwable e ){
     
      Logger.log(new LogEvent(LOGID, LogEvent.LT_ERROR,
          "Bouncy Castle not available"));
    }
   
    installSecurityManager();
   
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

            case REPORT_TYPE_CANCEL:
              if (null != downloader) {
                downloader.cancel();

                //KN: correct logger id?
                Logger.log(new LogEvent(LogIDs.LOGGER, MessageText.getString(
                    "FileDownload.canceled", new String[] {
                      getShortURL(decoded_url)
                    })));
              }
              break;
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

      try {
        // System.out.println( "writing download state for '" + new String(torrent.getName()));

        if (Logger.isEnabled())
          Logger.log(new LogEvent(torrent, LOGID, "Saving state for download '"
              + TorrentUtils.getLocalisedName(torrent) + "'"));

        torrent.setAdditionalMapProperty( ATTRIBUTE_KEY, attributes );
       
        TorrentUtils.writeToFile(torrent, true);

      } catch (Throwable e) {
        Logger.log(new LogEvent(torrent, LOGID, "Saving state", e));
      }
    } else {

      // System.out.println( "not writing download state for '" + new String(torrent.getName()));
    }
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

    String  peerSource,
    boolean  enabled )
  {
    if ( !getFlag( FLAG_ALLOW_PERMITTED_PEER_SOURCE_CHANGES )){
     
      Logger.log(new LogEvent(torrent, LOGID, "Attempt to modify permitted peer sources denied as disabled '"
              + TorrentUtils.getLocalisedName(torrent) + "'"));

      return;
    }
   
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

    {     
      //LGLogger.log( "updateSingleHash():: force=" + force + ", async=" +async+ ", url=" +scrapeURL+ ", hash=" +ByteFormatter.nicePrint(hash, true) );
   
      if ( scrapeURL == null ){
        if (Logger.isEnabled()) {
          Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
            "TrackerStatus: scrape cancelled.. url null"));
        }
     
        return;
      }
   
      try {
        ArrayList responsesToUpdate = new ArrayList();

        TRTrackerScraperResponseImpl response;
   
        try{
          hashes_mon.enter();
      
          response = (TRTrackerScraperResponseImpl)hashes.get( hash );
       
        }finally{
       
          hashes_mon.exit();
        }
 
        if ( response == null ){
         
          response = addHash(hash);
        }
       
        long lMainNextScrapeStartTime = response.getNextScrapeStartTime();
 
        if( !force && lMainNextScrapeStartTime > SystemTime.getCurrentTime() ) {
          if (Logger.isEnabled()) {
            Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
              "TrackerStatus: scrape cancelled.. not forced and still "
                  + (lMainNextScrapeStartTime - SystemTime.getCurrentTime())
                  + "ms"));
          }
          return;
        }
   
          // Set status id to SCRAPING, but leave status string until we actually
          // do the scrape
       
        response.setStatus(TRTrackerScraperResponse.ST_SCRAPING,
          MessageText.getString(SS + "scraping.queued"));
        if (Logger.isEnabled()) {
          Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
            "TrackerStatus: setting to scraping"));
        }

        responsesToUpdate.add(response);
     
          // Go through hashes and pick out other scrapes that are "close to" wanting a new scrape.
     
        if (!bSingleHashScrapes){
         
          try{
            hashes_mon.enter();
           
            Iterator iterHashes = hashes.values().iterator();
           
              // if we hit trackers with excessive scrapes they respond in varying fashions - from no reply
              // to returning 414 to whatever. Rather than hit trackers with large payloads that they then
              // reject we limit to MULTI_SCRAPE_LIMIT in one go
           
            while( iterHashes.hasNext() && responsesToUpdate.size() < GROUP_SCRAPES_LIMIT ){
             
              TRTrackerScraperResponseImpl r = (TRTrackerScraperResponseImpl)iterHashes.next();
             
              if ( !r.getHash().equals( hash )) {
               
                long lTimeDiff = Math.abs(lMainNextScrapeStartTime - r.getNextScrapeStartTime());
               
                if (lTimeDiff <= GROUP_SCRAPES_MS && r.getStatus() != TRTrackerScraperResponse.ST_SCRAPING) {
                 
                  r.setStatus(TRTrackerScraperResponse.ST_SCRAPING,
                      MessageText.getString(SS + "scraping.queued"));
                  if (Logger.isEnabled()) {
                    Logger.log(new LogEvent(TorrentUtils.getDownloadManager(r.getHash()), LOGID,
                      "TrackerStatus: setting to scraping via group scrape"));
                  }
                 
                  responsesToUpdate.add(r);
                }
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

              runScrapesSupport( responses, force );
            }
          });
       
        if (Logger.isEnabled()) {
          Logger.log(new LogEvent(LOGID, "TrackerStatus: queuing '" + scrapeURL
              + "', for " + responses.size() + " of " + hashes.size() + " hashes"
              + ", single_hash_scrapes: " + (bSingleHashScrapes ? "Y" : "N")
              + ", queue size=" + thread_pool.getQueueSize()));
        }
      }else{
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

      ArrayList   responses,
      boolean   force )
    {
    try {
      if (Logger.isEnabled()) {
        Logger.log(new LogEvent(LOGID, "TrackerStatus: scraping '" + scrapeURL
            + "', for " + responses.size() + " of " + hashes.size() + " hashes"
            + ", single_hash_scrapes: " + (bSingleHashScrapes ? "Y" : "N")));
      }

      boolean original_bSingleHashScrapes = bSingleHashScrapes;

      boolean disable_all_scrapes = !COConfigurationManager
          .getBooleanParameter("Tracker Client Scrape Enable");
      boolean disable_stopped_scrapes = !COConfigurationManager
          .getBooleanParameter("Tracker Client Scrape Stopped Enable");
     
      byte[]  scrape_reply = null;
     
      try {
        // if URL already includes a query component then just append our
        // params

        HashWrapper one_of_the_hashes = null;
        TRTrackerScraperResponseImpl one_of_the_responses = null;

        char first_separator = scrapeURL.indexOf('?') == -1 ? '?' : '&';

        String info_hash = "";

        String flags = "";
       
        List hashesForUDP = new ArrayList();
       
        for (int i = 0; i < responses.size(); i++) {
          TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses.get(i);

          HashWrapper hash = response.getHash();

          if (Logger.isEnabled())
            Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
                "TrackerStatus: scraping, single_hash_scrapes = "
                    + bSingleHashScrapes));

          if (!scraper.isNetworkEnabled(hash, tracker_url)) {

            response.setNextScrapeStartTime(SystemTime.getCurrentTime()
                + FAULTY_SCRAPE_RETRY_INTERVAL);

            response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
                .getString(SS + "networkdisabled"));

            scraper.scrapeReceived(response);

          } else if ( !force && (
                disable_all_scrapes ||
                (disable_stopped_scrapes && !scraper.isTorrentRunning(hash)))){

            response.setNextScrapeStartTime(SystemTime.getCurrentTime()
                + FAULTY_SCRAPE_RETRY_INTERVAL);

            response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
                .getString(SS + "disabled"));

            scraper.scrapeReceived(response);

          } else {

            response.setStatus(TRTrackerScraperResponse.ST_SCRAPING,
                MessageText.getString(SS + "scraping"));

            // technically haven't recieved a scrape yet, but we need
            // to notify listeners (the ones that display status)
            scraper.scrapeReceived(response);

            // the client-id stuff RELIES on info_hash being the FIRST
            // parameter added by
            // us to the URL, so don't change it!

            info_hash += ((one_of_the_hashes != null) ? '&' : first_separator)
                + "info_hash=";

            info_hash += URLEncoder.encode(
                new String(hash.getBytes(), Constants.BYTE_ENCODING),
                Constants.BYTE_ENCODING).replaceAll("\\+", "%20");

            Object[]  extensions = scraper.getExtensions(hash);
           
            if ( extensions != null ){
             
              if ( extensions[0] != null ){
               
                info_hash += (String)extensions[0];
              }
             
              flags += (Character)extensions[1];
             
            }else{
             
              flags += TRTrackerScraperClientResolver.FL_NONE;
            }
           
            one_of_the_responses = response;
            one_of_the_hashes = hash;
           
            // 28 + 16 + 70*20 -> IPv4/udp packet size of 1444 , that should go through most lines unfragmented
            if(hashesForUDP.size() < 70)
              hashesForUDP.add(hash);
          }
        } // for responses

        if (one_of_the_hashes == null)
          return;

        String  request = scrapeURL + info_hash;
       
        if ( az_tracker ){
         
          String  port_details = TRTrackerUtils.getPortsForURL();
         
          request += port_details;
         
          request += "&azsf=" + flags + "&azver=" + TRTrackerAnnouncer.AZ_TRACKER_VERSION_CURRENT;
        }
       
        URL reqUrl = new URL( request );

        if (Logger.isEnabled())
          Logger.log(new LogEvent(LOGID,
              "Accessing scrape interface using url : " + reqUrl));

        ByteArrayOutputStream message = new ByteArrayOutputStream();

        long scrapeStartTime = SystemTime.getCurrentTime();

        URL  redirect_url = null;
       
        String protocol = reqUrl.getProtocol();
       
          URL udpScrapeURL = null;
         
          boolean auto_probe = false;
         
          if (protocol.equalsIgnoreCase("udp")){
           
            if ( udpScrapeEnabled ){
         
              udpScrapeURL = reqUrl;
             
            }else{
             
              throw( new IOException( "UDP Tracker protocol disabled" ));
             
            }
          }else if ( protocol.equalsIgnoreCase("http") &&
              !az_tracker &&
              scrapeCount % autoUDPscrapeEvery == 0 &&
              udpProbeEnabled && udpScrapeEnabled ){
           
            udpScrapeURL = new URL(reqUrl.toString().replaceFirst("^http", "udp"));
           
            auto_probe = true;
          }
         
          try{
              // set context in case authentication dialog is required
           
          TorrentUtils.setTLSTorrentHash(one_of_the_hashes);

            if ( udpScrapeURL != null){
             
              boolean success = scrapeUDP( reqUrl, message, hashesForUDP, !auto_probe );
             
              if((!success || message.size() == 0) && !protocol.equalsIgnoreCase("udp"))
              { // automatic UDP probe failed, use HTTP again
                udpScrapeURL = null;
                message.reset();
                if(autoUDPscrapeEvery < 16)
                  autoUDPscrapeEvery <<= 1;
              if (Logger.isEnabled())
                Logger.log(new LogEvent(LOGID, LogEvent.LT_INFORMATION, "redirection of http scrape ["+scrapeURL+"] to udp failed, will retry in "+autoUDPscrapeEvery+" scrapes"));
              } else if(success && !protocol.equalsIgnoreCase("udp"))
              {
              if (Logger.isEnabled())
                Logger.log(new LogEvent(LOGID, LogEvent.LT_INFORMATION, "redirection of http scrape ["+scrapeURL+"] to udp successful"));
                autoUDPscrapeEvery = 1;
                TRTrackerUtils.setUDPProbeResult( reqUrl, true );
              }
               
            }
           
            scrapeCount++;
           
            if(udpScrapeURL == null)
              redirect_url = scrapeHTTP(reqUrl, message);
        }finally{
         
          TorrentUtils.setTLSTorrentHash( null );
        }
       
        scrape_reply = message.toByteArray();
       
        Map map = BDecoder.decode( scrape_reply );
               
        boolean  this_is_az_tracker = map.get( "aztracker" ) != null;
       
        if ( az_tracker != this_is_az_tracker ){
           
          az_tracker  = this_is_az_tracker;
         
          TRTrackerUtils.setAZTracker( tracker_url, az_tracker );
        }
       
        Map mapFiles = (Map) map.get("files");

        if (Logger.isEnabled())
          Logger.log(new LogEvent(LOGID, "Response from scrape interface "
              + scrapeURL + ": "
              + ((mapFiles == null) ? "null" : "" + mapFiles.size())
              + " returned"));

        int iMinRequestInterval = 0;
        if (map != null) {
          /* "The spec":
           * files
           *   infohash
           *   complete
           *   incomplete
           *   downloaded
           *   name
           *  flags
           *    min_request_interval
           *  failure reason
           */
          /*
           * files infohash complete incomplete downloaded name flags
           * min_request_interval
           */
          Map mapFlags = (Map) map.get("flags");
          if (mapFlags != null) {
            Long longScrapeValue = (Long) mapFlags
                .get("min_request_interval");
            if (longScrapeValue != null)
              iMinRequestInterval = longScrapeValue.intValue();
            // Tracker owners want this log entry
            if (Logger.isEnabled())
              Logger.log(new LogEvent(LOGID,
                "Received min_request_interval of " + iMinRequestInterval));
          }
        }

        if (mapFiles == null || mapFiles.size() == 0) {

          // azureus extension here to handle "failure reason" returned for
          // scrapes

          byte[] failure_reason_bytes = map == null ? null : (byte[]) map
              .get("failure reason");

          if (failure_reason_bytes != null) {
            long nextScrapeTime = SystemTime.getCurrentTime()
                + ((iMinRequestInterval == 0) ? FAULTY_SCRAPE_RETRY_INTERVAL
                    : iMinRequestInterval * 1000);

            for (int i = 0; i < responses.size(); i++) {

              TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
                  .get(i);

              response.setNextScrapeStartTime(nextScrapeTime);

              response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                  MessageText.getString(SS + "error")
                      + new String(failure_reason_bytes,
                          Constants.DEFAULT_ENCODING));

              // notifiy listeners

              scraper.scrapeReceived(response);
            }

          } else {
            if (responses.size() > 1) {
              // multi were requested, 0 returned. Therefore, multi not
              // supported
              bSingleHashScrapes = true;
              if (Logger.isEnabled())
                Logger.log(new LogEvent(LOGID, LogEvent.LT_WARNING, scrapeURL
                    + " doesn't properly support " + "multi-hash scrapes"));

              for (int i = 0; i < responses.size(); i++) {
                TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
                    .get(i);

                response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                    MessageText.getString(SS + "error")
                        + MessageText.getString(SSErr + "invalid"));
                // notifiy listeners
                scraper.scrapeReceived(response);
              }
            } else {
              long nextScrapeTime = SystemTime.getCurrentTime()
                  + ((iMinRequestInterval == 0) ? NOHASH_RETRY_INTERVAL
                      : iMinRequestInterval * 1000);
              // 1 was requested, 0 returned. Therefore, hash not found.
              TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
                  .get(0);
              response.setNextScrapeStartTime(nextScrapeTime);
              response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                  MessageText.getString(SS + "error")
                      + MessageText.getString(SSErr + "nohash"));
              // notifiy listeners
              scraper.scrapeReceived(response);
            }
          }

          return;
        }

        /*
         * If we requested mutliple hashes, but only one was returned, revert
         * to Single Hash Scrapes, but continue on to process the one has that
         * was returned (it may be a random one from the list)
         */
        if (!bSingleHashScrapes && responses.size() > 1
            && mapFiles.size() == 1) {
          bSingleHashScrapes = true;
          if (Logger.isEnabled())
            Logger.log(new LogEvent(LOGID, LogEvent.LT_WARNING, scrapeURL
                + " only returned " + mapFiles.size()
                + " hash scrape(s), but we asked for " + responses.size()));
        }

        for (int i = 0; i < responses.size(); i++) {
          TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
              .get(i);

          // LGLogger.log( "decoding response #" +i+ ": " +
          // ByteFormatter.nicePrint( response.getHash(), true ) );

          // retrieve the scrape data for the relevent infohash
          Map scrapeMap = (Map) mapFiles.get(new String(response.getHash().getBytes(),
              Constants.BYTE_ENCODING));

          if (scrapeMap == null) {
            // some trackers that return only 1 hash return a random one!
            if (responses.size() == 1 || mapFiles.size() != 1) {

              response.setNextScrapeStartTime(SystemTime.getCurrentTime()
                  + NOHASH_RETRY_INTERVAL);

              response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                  MessageText.getString(SS + "error")
                      + MessageText.getString(SSErr + "nohash"));
              // notifiy listeners
              scraper.scrapeReceived(response);
            } else if (!disable_stopped_scrapes || scraper.isTorrentRunning(response.getHash())) {
              // This tracker doesn't support multiple hash requests.
              // revert status to what it was

              response.revertStatus();

              if (response.getStatus() == TRTrackerScraperResponse.ST_SCRAPING) {

                // System.out.println("Hash " +
                // ByteFormatter.nicePrint(response.getHash(), true) + "
                // mysteriously reverted to ST_SCRAPING!");

                // response.setStatus(TRTrackerScraperResponse.ST_ONLINE, "");

                response.setNextScrapeStartTime(SystemTime.getCurrentTime()
                    + FAULTY_SCRAPE_RETRY_INTERVAL);

                response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                    MessageText.getString(SS + "error")
                        + MessageText.getString(SSErr + "invalid"));

              } else {

                // force single-hash scrapes here

                bSingleHashScrapes = true;

                // only leave the next retry time if this is the first single
                // hash fail

                if (original_bSingleHashScrapes) {

                  response.setNextScrapeStartTime(SystemTime.getCurrentTime()
                      + FAULTY_SCRAPE_RETRY_INTERVAL);
                }

              }
              // notifiy listeners
              scraper.scrapeReceived(response);

              // if this was the first scrape request in the list,
              // TrackerChecker
              // will attempt to scrape again because we didn't reset the
              // nextscrapestarttime. But the next time, bSingleHashScrapes
              // will be true, and only 1 has will be requested, so there
              // will not be infinite looping
            }
            // System.out.println("scrape: hash missing from reply");
          } else {
            // retrieve values
            int seeds = ((Long) scrapeMap.get("complete")).intValue();
            int peers = ((Long) scrapeMap.get("incomplete")).intValue();
            Long comp = (Long) scrapeMap.get("downloaded");
            int completed = comp == null ? -1 : comp.intValue();

            // make sure we dont use invalid replies
            if (seeds < 0 || peers < 0 || completed < -1) {
              if (Logger.isEnabled()) {
                HashWrapper hash = response.getHash();
                Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash),
                    LOGID, "Invalid scrape response from '" + reqUrl
                        + "': map = " + scrapeMap));
              }

              // We requested multiple hashes, but tracker didn't support
              // multiple hashes and returned 1 hash. However, that hash is
              // invalid because seeds or peers was < 0. So, exit. Scrape
              // manager will run scrapes for each individual hash.
              if (responses.size() > 1 && bSingleHashScrapes) {

                response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                    MessageText.getString(SS + "error")
                        + MessageText.getString(SSErr + "invalid"));

                scraper.scrapeReceived(response);

                continue;
              }

              response.setNextScrapeStartTime(SystemTime.getCurrentTime()
                  + FAULTY_SCRAPE_RETRY_INTERVAL);
              response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                  MessageText.getString(SS + "error")
                      + MessageText.getString(SSErr + "invalid")
                      + " "
                      + (seeds < 0 ? MessageText
                          .getString("MyTorrentsView.seeds")
                          + " == " + seeds + ". " : "")
                      + (peers < 0 ? MessageText
                          .getString("MyTorrentsView.peers")
                          + " == " + peers + ". " : "")
                      + (completed < 0 ? MessageText
                          .getString("MyTorrentsView.completed")
                          + " == " + completed + ". " : ""));

              scraper.scrapeReceived(response);

              continue;
            }

            int scrapeInterval = TRTrackerScraperResponseImpl
                .calcScrapeIntervalSecs(iMinRequestInterval, seeds);

            long nextScrapeTime = SystemTime.getCurrentTime()
                + (scrapeInterval * 1000);
            response.setNextScrapeStartTime(nextScrapeTime);

            // create the response
            response.setScrapeStartTime(scrapeStartTime);
            response.setSeeds(seeds);
            response.setPeers(peers);
            response.setCompleted(completed);
            response.setStatus(TRTrackerScraperResponse.ST_ONLINE,
                MessageText.getString(SS + "ok"));

            // notifiy listeners
            scraper.scrapeReceived(response);
           
            try{
              if ( responses.size() == 1 && redirect_url != null ){
               
                  // we only deal with redirects for single urls - if the tracker wants to
                  // redirect one of a group is has to force single-hash scrapes anyway
               
                String  redirect_str = redirect_url.toString();
               
                int s_pos =  redirect_str.indexOf( "/scrape" );
               
                if ( s_pos != -1 ){
                 
                  URL  new_url = new URL( redirect_str.substring(0,s_pos) +
                          "/announce" + redirect_str.substring(s_pos+7));
                 
                  if ( scraper.redirectTrackerUrl( response.getHash(), tracker_url, new_url )){
                   
                    removeHash( response.getHash());
                  }
                }
              }
            }catch( Throwable e ){
             
              Debug.printStackTrace(e);
            }
          }
        } // for responses

      } catch (NoClassDefFoundError ignoreSSL) { // javax/net/ssl/SSLSocket
        for (int i = 0; i < responses.size(); i++) {
          TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
              .get(i);
          response.setNextScrapeStartTime(SystemTime.getCurrentTime()
              + FAULTY_SCRAPE_RETRY_INTERVAL);
          response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
              .getString(SS + "error")
              + ignoreSSL.getMessage());
          // notifiy listeners
          scraper.scrapeReceived(response);
        }
      } catch (FileNotFoundException e) {
        for (int i = 0; i < responses.size(); i++) {
          TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
              .get(i);
          response.setNextScrapeStartTime(SystemTime.getCurrentTime()
              + FAULTY_SCRAPE_RETRY_INTERVAL);
          response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
              .getString(SS + "error")
              + MessageText.getString("DownloadManager.error.filenotfound"));
          // notifiy listeners
          scraper.scrapeReceived(response);
        }
      } catch (SocketException e) {
        setAllError(e);
      } catch (SocketTimeoutException e) {
        setAllError(e);
      } catch (UnknownHostException e) {
        setAllError(e);
      } catch (PRUDPPacketHandlerException e) {
        setAllError(e);
      } catch (BEncodingException e) {
        setAllError(e);
      } catch (Exception e) {
       
        // for apache we can get error 414 - URL too long. simplest solution
        // for this
        // is to fall back to single scraping

        String error_message = e.getMessage();
       
        if (error_message != null) {
          if (error_message.indexOf(" 500 ") >= 0
              || error_message.indexOf(" 400 ") >= 0
              || error_message.indexOf(" 403 ") >= 0
              || error_message.indexOf(" 404 ") >= 0
              || error_message.indexOf(" 501 ") >= 0) {
            // various errors that have a 99% chance of happening on
            // any other scrape request
            setAllError(e);
            return;
          }

          if (error_message.indexOf("414") != -1
              && !bSingleHashScrapes) {
            bSingleHashScrapes = true;
            // Skip the setuing up the response.  We want to scrape again
            return;
          }
        }

        String msg = Debug.getNestedExceptionMessage(e);

        if ( scrape_reply != null ){

           String  trace_data;
          
           if ( scrape_reply.length <= 150 ){
            
             trace_data = new String(scrape_reply);
            
           }else{
            
             trace_data = new String(scrape_reply,0,150) + "...";
           }
          
           msg += " [" + trace_data + "]";
        }
       
        for (int i = 0; i < responses.size(); i++) {
          TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
              .get(i);

          if (Logger.isEnabled()) {
            HashWrapper hash = response.getHash();
            Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
                LogEvent.LT_ERROR, "Error from scrape interface " + scrapeURL
                    + " : " + msg + " (" + e.getClass() + ")"));
          }

          response.setNextScrapeStartTime(SystemTime.getCurrentTime()
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

    for (int i = 0; i < values.length; i++) {
      TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) values[i];

      if (Logger.isEnabled()) {
        HashWrapper hash = response.getHash();
        Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
            LogEvent.LT_WARNING, "Error from scrape interface " + scrapeURL
                + " : " + msg));
        //e.printStackTrace();
      }
View Full Code Here

Examples of org.gudy.azureus2.core3.logging.LogEvent

          break;
        }
        } catch (Exception e) {
         
          if (Logger.isEnabled())
            Logger.log(new LogEvent(LOGID, LogEvent.LT_ERROR,
                "Error from scrape interface " + scrapeURL + " : "
                    + Debug.getNestedExceptionMessage(e)));

          return( null );
        }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.