Examples of IVideoPicture


Examples of com.xuggle.xuggler.IVideoPicture

      System.out.println(e.getMessage());
      e.printStackTrace(System.out);
    }

    long timeStamp = (now - firstTimeStamp)*1000; // convert to microseconds
    IVideoPicture outFrame = converter.toPicture(worksWithXugglerBufferedImage,
        timeStamp);

    outFrame.setQuality(0);
    int retval = outStreamCoder.encodeVideo(packet, outFrame, 0);
    if (retval < 0)
      throw new RuntimeException("could not encode video");
    if (packet.isComplete())
    {
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

     */
    IStreamCoder ic = null;
    IStreamCoder oc = null;
    IAudioResampler as = null;
    IVideoResampler vs = null;
    IVideoPicture inFrame = null;
    IVideoPicture reFrame = null;

    /**
     * Now, we've already opened the files in #setupStreams(CommandLine). We
     * just keep reading packets from it until the IContainer returns <0
     */
    while (mIContainer.readNextPacket(iPacket) == 0)
    {
      /**
       * Find out which stream this packet belongs to.
       */
      int i = iPacket.getStreamIndex();
      int offset = 0;

      /**
       * Find out if this stream has a starting timestamp
       */
      IStream stream = mIContainer.getStream(i);
      long tsOffset = 0;
      if (stream.getStartTime() != Global.NO_PTS && stream.getStartTime() > 0
          && stream.getTimeBase() != null)
      {
        IRational defTimeBase = IRational.make(1,
            (int) Global.DEFAULT_PTS_PER_SECOND);
        tsOffset = defTimeBase.rescale(stream.getStartTime(), stream
            .getTimeBase());
      }
      /**
       * And look up the appropriate objects that are working on that stream.
       */
      ic = mICoders[i];
      oc = mOCoders[i];
      as = mASamplers[i];
      vs = mVSamplers[i];
      inFrame = mIVideoPictures[i];
      reFrame = mOVideoPictures[i];
      inSamples = mISamples[i];
      reSamples = mOSamples[i];

      if (oc == null)
        // we didn't set up this coder; ignore the packet
        continue;

      /**
       * Find out if the stream is audio or video.
       */
      ICodec.Type cType = ic.getCodecType();

      if (cType == ICodec.Type.CODEC_TYPE_AUDIO && mHasAudio)
      {
        /**
         * Decoding audio works by taking the data in the packet, and eating
         * chunks from it to create decoded raw data.
         *
         * However, there may be more data in a packet than is needed to get one
         * set of samples (or less), so you need to iterate through the byts to
         * get that data.
         *
         * The following loop is the standard way of doing that.
         */
        while (offset < iPacket.getSize())
        {
          retval = ic.decodeAudio(inSamples, iPacket, offset);
          if (retval <= 0)
            throw new RuntimeException("could not decode audio.  stream: " + i);

          if (inSamples.getTimeStamp() != Global.NO_PTS)
            inSamples.setTimeStamp(inSamples.getTimeStamp() - tsOffset);

          log.trace("packet:{}; samples:{}; offset:{}", new Object[]
          {
              iPacket, inSamples, tsOffset
          });

          /**
           * If not an error, the decodeAudio returns the number of bytes it
           * consumed. We use that so the next time around the loop we get new
           * data.
           */
          offset += retval;
          int numSamplesConsumed = 0;
          /**
           * If as is not null then we know a resample was needed, so we do that
           * resample now.
           */
          if (as != null && inSamples.getNumSamples() > 0)
          {
            retval = as.resample(reSamples, inSamples, inSamples
                .getNumSamples());

            outSamples = reSamples;
          }
          else
          {
            outSamples = inSamples;
          }

          /**
           * Include call a hook to derivied classes to allow them to alter the
           * audio frame.
           */

          outSamples = alterAudioFrame(outSamples);

          /**
           * Now that we've resampled, it's time to encode the audio.
           *
           * This workflow is similar to decoding; you may have more, less or
           * just enough audio samples available to encode a packet. But you
           * must iterate through.
           *
           * Unfortunately (don't ask why) there is a slight difference between
           * encodeAudio and decodeAudio; encodeAudio returns the number of
           * samples consumed, NOT the number of bytes. This can be confusing,
           * and we encourage you to read the IAudioSamples documentation to
           * find out what the difference is.
           *
           * But in any case, the following loop encodes the samples we have
           * into packets.
           */
          while (numSamplesConsumed < outSamples.getNumSamples())
          {
            retval = oc.encodeAudio(oPacket, outSamples, numSamplesConsumed);
            if (retval <= 0)
              throw new RuntimeException("Could not encode any audio: "
                  + retval);
            /**
             * Increment the number of samples consumed, so that the next time
             * through this loop we encode new audio
             */
            numSamplesConsumed += retval;
            log.trace("out packet:{}; samples:{}; offset:{}", new Object[]{
                oPacket, outSamples, tsOffset
            });

            writePacket(oPacket);
          }
        }

      }
      else if (cType == ICodec.Type.CODEC_TYPE_VIDEO && mHasVideo)
      {
        /**
         * This encoding workflow is pretty much the same as the for the audio
         * above.
         *
         * The only major delta is that encodeVideo() will always consume one
         * frame (whereas encodeAudio() might only consume some samples in an
         * IAudioSamples buffer); it might not be able to output a packet yet,
         * but you can assume that you it consumes the entire frame.
         */
        IVideoPicture outFrame = null;
        while (offset < iPacket.getSize())
        {
          retval = ic.decodeVideo(inFrame, iPacket, offset);
          if (retval <= 0)
            throw new RuntimeException("could not decode any video.  stream: "
                + i);

          log.trace("decoded vid ts: {}; pkts ts: {}", inFrame.getTimeStamp(),
              iPacket.getTimeStamp());
          if (inFrame.getTimeStamp() != Global.NO_PTS)
            inFrame.setTimeStamp(inFrame.getTimeStamp() - tsOffset);

          offset += retval;
          if (inFrame.isComplete())
          {
            if (vs != null)
            {
              retval = vs.resample(reFrame, inFrame);
              if (retval < 0)
                throw new RuntimeException("could not resample video");
              outFrame = reFrame;
            }
            else
            {
              outFrame = inFrame;
            }

            /**
             * Include call a hook to derivied classes to allow them to alter
             * the audio frame.
             */

            outFrame = alterVideoFrame(outFrame);

            outFrame.setQuality(0);
            retval = oc.encodeVideo(oPacket, outFrame, 0);
            if (retval < 0)
              throw new RuntimeException("could not encode video");
            writePacket(oPacket);
          }
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

                    continue;
            }

            if (packet.getStreamIndex() == videoStreamIndex) {
                // one of the pictures in this packet
                IVideoPicture picture = IVideoPicture.make(
                    coder.getPixelType(), coder.getWidth(), coder.getHeight());

                // offset for already processed data in package
                int offset = 0;
                // fetch every frame from the packet
                while (offset < packet.getSize()) {
                    int bytesDecoded = coder.decodeVideo(picture, packet,
                        offset);

                    if (bytesDecoded < 0) {
                        videoSharingSession.reportError(new DecodingException(
                            IError.make(bytesDecoded).getDescription()));
                        return;
                    }

                    offset += bytesDecoded;

                    if (picture.isComplete()) {
                        sleepUntilRenderFrame(picture);
                        updatePlayer(picture);
                        statistic.renderedFrame();
                    }
                }
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

                coder.getHeight(), coder.getPixelType());
            if (resampler == null)
                throw new RuntimeException("resampler not available");
        }

        IVideoPicture newPicture = IVideoPicture.make(pixelformat,
            coder.getWidth(), coder.getHeight());

        if (resampler.resample(newPicture, picture) < 0)
            throw new RuntimeException("could not resample the picture");
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

      int crossHatchH,
      int crossHatchYColor,
      int crossHatchUColor,
      int crossHatchVColor)
  {
    IVideoPicture frame = IVideoPicture.make(IPixelFormat.Type.YUV420P, w, h);
    if (frame == null)
      throw new OutOfMemoryError("could not allocate frame");
   
    if (frame != null)
    {
      AtomicReference<JNIReference> ref = new AtomicReference<JNIReference>(
          null);
      IBuffer data = null;
      try
      {
        data = frame.getData();
        int bufSize = frame.getSize();
        java.nio.ByteBuffer buffer = data.getByteBuffer(0, bufSize, ref);
        if (buffer != null)
        {
          // we have the raw data; now we set it to the specified YUV value.
          int lineLength = 0;
          int offset = 0;

          // first let's check the L
          offset = 0;
          lineLength = frame.getDataLineSize(0);
          int sliceLen = lineLength * h;
          for (int i = offset; i < offset + sliceLen; i++)
          {
            int x = (i - offset) % lineLength;
            int y = (i - offset) / lineLength;
            if (crossHatchH > 0
                && crossHatchW > 0
                && (((x / crossHatchW) % 2 == 1 && (y / crossHatchH) % 2 == 0) || ((x / crossHatchW) % 2 == 0 && (y / crossHatchH) % 2 == 1)))
              buffer.put(i, (byte) crossHatchYColor);
            else
              buffer.put(i, (byte) yColor);
          }

          // now, check the U value
          offset = (frame.getDataLineSize(0) * h);
          lineLength = frame.getDataLineSize(1);
          sliceLen = lineLength * ((h + 1) / 2);
          for (int i = offset; i < offset + sliceLen; i++)
          {
            if (crossHatchH > 0 && crossHatchW > 0)
            {
              // put x and y in bottom right of pixel range
              int x = ((i - offset) % lineLength) * 2;
              int y = ((i - offset) / lineLength) * 2;

              int[] xCoords = new int[]
              {
                  x, x + 1, x, x + 1
              };
              int[] yCoords = new int[]
              {
                  y, y, y + 1, y + 1
              };
              int finalColor = 0;
              for (int j = 0; j < xCoords.length; j++)
              {
                int color = uColor;
                x = xCoords[j];
                y = yCoords[j];
                if (((x / crossHatchW) % 2 == 1 && (y / crossHatchH) % 2 == 0)
                    || ((x / crossHatchW) % 2 == 0 && (y / crossHatchH) % 2 == 1))
                {
                  color = crossHatchUColor;
                }
                finalColor += color;
              }
              finalColor /= xCoords.length;
              buffer.put(i, (byte) finalColor);
            }
            else
              buffer.put(i, (byte) uColor);
          }

          // and finally the V
          offset = (frame.getDataLineSize(0) * h)
              + (frame.getDataLineSize(1) * ((h + 1) / 2));
          lineLength = frame.getDataLineSize(2);
          sliceLen = lineLength * ((h + 1) / 2);
          for (int i = offset; i < offset + sliceLen; i++)
          {
            if (crossHatchH > 0 && crossHatchW > 0)
            {
              // put x and y in bottom right of pixel range
              int x = ((i - offset) % lineLength) * 2;
              int y = ((i - offset) / lineLength) * 2;

              int[] xCoords = new int[]
              {
                  x, x + 1, x, x + 1
              };
              int[] yCoords = new int[]
              {
                  y, y, y + 1, y + 1
              };
              int finalColor = 0;
              for (int j = 0; j < xCoords.length; j++)
              {
                int color = vColor;
                x = xCoords[j];
                y = yCoords[j];
                if (((x / crossHatchW) % 2 == 1 && (y / crossHatchH) % 2 == 0)
                    || ((x / crossHatchW) % 2 == 0 && (y / crossHatchH) % 2 == 1))
                  color = crossHatchVColor;
                finalColor += color;
              }
              finalColor /= xCoords.length;
              buffer.put(i, (byte) finalColor);
            }
            else
              buffer.put(i, (byte) vColor);
          }
        }
        // set it complete
        frame.setComplete(true, IPixelFormat.Type.YUV420P, w, h, pts);
      }
      finally
      {
        if (data != null)
          data.delete();
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

     */
    IStreamCoder ic = null;
    IStreamCoder oc = null;
    IAudioResampler as = null;
    IVideoResampler vs = null;
    IVideoPicture inFrame = null;
    IVideoPicture reFrame = null;

    /**
     * Now, we've already opened the files in #setupStreams(CommandLine). We
     * just keep reading packets from it until the IContainer returns <0
     */
    while (mIContainer.readNextPacket(iPacket) == 0)
    {
      /**
       * Find out which stream this packet belongs to.
       */
      int i = iPacket.getStreamIndex();
      int offset = 0;

      /**
       * Find out if this stream has a starting timestamp
       */
      IStream stream = mIContainer.getStream(i);
      long tsOffset = 0;
      if (stream.getStartTime() != Global.NO_PTS && stream.getStartTime() > 0
          && stream.getTimeBase() != null)
      {
        IRational defTimeBase = IRational.make(1,
            (int) Global.DEFAULT_PTS_PER_SECOND);
        tsOffset = defTimeBase.rescale(stream.getStartTime(), stream
            .getTimeBase());
      }
      /**
       * And look up the appropriate objects that are working on that stream.
       */
      ic = mICoders[i];
      oc = mOCoders[i];
      as = mASamplers[i];
      vs = mVSamplers[i];
      inFrame = mIVideoPictures[i];
      reFrame = mOVideoPictures[i];
      inSamples = mISamples[i];
      reSamples = mOSamples[i];

      if (oc == null)
        // we didn't set up this coder; ignore the packet
        continue;

      /**
       * Find out if the stream is audio or video.
       */
      ICodec.Type cType = ic.getCodecType();

      if (cType == ICodec.Type.CODEC_TYPE_AUDIO && mHasAudio)
      {
        /**
         * Decoding audio works by taking the data in the packet, and eating
         * chunks from it to create decoded raw data.
         *
         * However, there may be more data in a packet than is needed to get one
         * set of samples (or less), so you need to iterate through the byts to
         * get that data.
         *
         * The following loop is the standard way of doing that.
         */
        while (offset < iPacket.getSize())
        {
          retval = ic.decodeAudio(inSamples, iPacket, offset);
          if (retval <= 0)
            throw new RuntimeException("could not decode audio.  stream: " + i);

          if (inSamples.getTimeStamp() != Global.NO_PTS)
            inSamples.setTimeStamp(inSamples.getTimeStamp() - tsOffset);

          log.trace("packet:{}; samples:{}; offset:{}", new Object[]
          {
              iPacket, inSamples, tsOffset
          });

          /**
           * If not an error, the decodeAudio returns the number of bytes it
           * consumed. We use that so the next time around the loop we get new
           * data.
           */
          offset += retval;
          int numSamplesConsumed = 0;
          /**
           * If as is not null then we know a resample was needed, so we do that
           * resample now.
           */
          if (as != null && inSamples.getNumSamples() > 0)
          {
            retval = as.resample(reSamples, inSamples, inSamples
                .getNumSamples());

            outSamples = reSamples;
          }
          else
          {
            outSamples = inSamples;
          }

          /**
           * Include call a hook to derivied classes to allow them to alter the
           * audio frame.
           */

          outSamples = alterAudioFrame(outSamples);

          /**
           * Now that we've resampled, it's time to encode the audio.
           *
           * This workflow is similar to decoding; you may have more, less or
           * just enough audio samples available to encode a packet. But you
           * must iterate through.
           *
           * Unfortunately (don't ask why) there is a slight difference between
           * encodeAudio and decodeAudio; encodeAudio returns the number of
           * samples consumed, NOT the number of bytes. This can be confusing,
           * and we encourage you to read the IAudioSamples documentation to
           * find out what the difference is.
           *
           * But in any case, the following loop encodes the samples we have
           * into packets.
           */
          while (numSamplesConsumed < outSamples.getNumSamples())
          {
            retval = oc.encodeAudio(oPacket, outSamples, numSamplesConsumed);
            if (retval <= 0)
              throw new RuntimeException("Could not encode any audio: "
                  + retval);
            /**
             * Increment the number of samples consumed, so that the next time
             * through this loop we encode new audio
             */
            numSamplesConsumed += retval;
            log.trace("out packet:{}; samples:{}; offset:{}", new Object[]{
                oPacket, outSamples, tsOffset
            });

            writePacket(oPacket);
          }
        }

      }
      else if (cType == ICodec.Type.CODEC_TYPE_VIDEO && mHasVideo)
      {
        /**
         * This encoding workflow is pretty much the same as the for the audio
         * above.
         *
         * The only major delta is that encodeVideo() will always consume one
         * frame (whereas encodeAudio() might only consume some samples in an
         * IAudioSamples buffer); it might not be able to output a packet yet,
         * but you can assume that you it consumes the entire frame.
         */
        IVideoPicture outFrame = null;
        while (offset < iPacket.getSize())
        {
          retval = ic.decodeVideo(inFrame, iPacket, offset);
          if (retval <= 0)
            throw new RuntimeException("could not decode any video.  stream: "
                + i);

          log.trace("decoded vid ts: {}; pkts ts: {}", inFrame.getTimeStamp(),
              iPacket.getTimeStamp());
          if (inFrame.getTimeStamp() != Global.NO_PTS)
            inFrame.setTimeStamp(inFrame.getTimeStamp() - tsOffset);

          offset += retval;
          if (inFrame.isComplete())
          {
            if (vs != null)
            {
              retval = vs.resample(reFrame, inFrame);
              if (retval < 0)
                throw new RuntimeException("could not resample video");
              outFrame = reFrame;
            }
            else
            {
              outFrame = inFrame;
            }

            /**
             * Include call a hook to derivied classes to allow them to alter
             * the audio frame.
             */

            outFrame = alterVideoFrame(outFrame);

            outFrame.setQuality(0);
            retval = oc.encodeVideo(oPacket, outFrame, 0);
            if (retval < 0)
              throw new RuntimeException("could not encode video");
            writePacket(oPacket);
          }
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

    if (null == stream)
      return;

    // convert the image to a picture and push it off to be encoded

    IVideoPicture picture = convertToPicture(streamIndex,
      image, MICROSECONDS.convert(timeStamp, timeUnit));

    try
    {
      encodeVideo(streamIndex, picture, image);
    }
    finally
    {
      if (picture != null)
        picture.delete();
    }
  }
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

      if (packet.getStreamIndex() == videoStreamId)
      {
        // We allocate a new picture to get the data out of Xuggle

        IVideoPicture picture = IVideoPicture.make(videoCoder.getPixelType(),
            videoCoder.getWidth(), videoCoder.getHeight());

        int offset = 0;
        while(offset < packet.getSize())
        {
          // Now, we decode the video, checking for any errors.

          int bytesDecoded = videoCoder.decodeVideo(picture, packet, offset);
          if (bytesDecoded < 0)
            throw new RuntimeException("got error decoding video in: " + filename);
          offset += bytesDecoded;
         
          // Some decoders will consume data in a packet, but will not
          // be able to construct a full video picture yet.  Therefore
          // you should always check if you got a complete picture from
          // the decode.

          if (picture.isComplete())
          {
            IVideoPicture newPic = picture;
           
            // If the resampler is not null, it means we didn't get the
            // video in BGR24 format and need to convert it into BGR24
            // format.

            if (resampler != null)
            {
              // we must resample
              newPic = IVideoPicture.make(
                resampler.getOutputPixelFormat(), picture.getWidth(),
                picture.getHeight());
              if (resampler.resample(newPic, picture) < 0)
                throw new RuntimeException(
                  "could not resample video from: " + filename);
            }

            if (newPic.getPixelType() != IPixelFormat.Type.BGR24)
              throw new RuntimeException(
                "could not decode video as BGR 24 bit data in: " + filename);

            // convert the BGR24 to an Java buffered image
View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

      if (packet.getStreamIndex() == videoStreamId)
      {
        /*
         * We allocate a new picture to get the data out of Xuggler
         */
        IVideoPicture picture = IVideoPicture.make(videoCoder.getPixelType(),
            videoCoder.getWidth(), videoCoder.getHeight());

        int offset = 0;
        while(offset < packet.getSize())
        {
          /*
           * Now, we decode the video, checking for any errors.
           *
           */
          int bytesDecoded = videoCoder.decodeVideo(picture, packet, offset);
          if (bytesDecoded < 0)
            throw new RuntimeException("got error decoding video in: " + deviceName);
          offset += bytesDecoded;

          /*
           * Some decoders will consume data in a packet, but will not be able to construct
           * a full video picture yet.  Therefore you should always check if you
           * got a complete picture from the decoder
           */
          if (picture.isComplete())
          {
            IVideoPicture newPic = picture;
            /*
             * If the resampler is not null, that means we didn't get the video in BGR24 format and
             * need to convert it into BGR24 format.
             */
            if (resampler != null)
            {
              // we must resample
              newPic = IVideoPicture.make(resampler.getOutputPixelFormat(), picture.getWidth(), picture.getHeight());
              if (resampler.resample(newPic, picture) < 0)
                throw new RuntimeException("could not resample video from: " + deviceName);
            }
            if (newPic.getPixelType() != IPixelFormat.Type.BGR24)
              throw new RuntimeException("could not decode video as BGR 24 bit data in: " + deviceName);

            // Convert the BGR24 to an Java buffered image
            BufferedImage javaImage = Utils.videoPictureToImage(newPic);

View Full Code Here

Examples of com.xuggle.xuggler.IVideoPicture

      if (packet.getStreamIndex() == videoStreamId)
      {
        /*
         * We allocate a new picture to get the data out of Xuggler
         */
        IVideoPicture picture = IVideoPicture.make(videoCoder.getPixelType(),
            videoCoder.getWidth(), videoCoder.getHeight());
       
        /*
         * Now, we decode the video, checking for any errors.
         *
         */
        int bytesDecoded = videoCoder.decodeVideo(picture, packet, 0);
        if (bytesDecoded < 0)
          throw new RuntimeException("got error decoding audio in: " + filename);

        /*
         * Some decoders will consume data in a packet, but will not be able to construct
         * a full video picture yet.  Therefore you should always check if you
         * got a complete picture from the decoder
         */
        if (picture.isComplete())
        {
          IVideoPicture newPic = picture;
          /*
           * If the resampler is not null, that means we didn't get the video in BGR24 format and
           * need to convert it into BGR24 format.
           */
          if (resampler != null)
          {
            // we must resample
            newPic = IVideoPicture.make(resampler.getOutputPixelFormat(), picture.getWidth(), picture.getHeight());
            if (resampler.resample(newPic, picture) < 0)
              throw new RuntimeException("could not resample video from: " + filename);
          }
          if (newPic.getPixelType() != IPixelFormat.Type.BGR24)
            throw new RuntimeException("could not decode video as BGR 24 bit data in: " + filename);

          long delay = millisecondsUntilTimeToDisplay(newPic);
          // if there is no audio stream; go ahead and hold up the main thread.  We'll end
          // up caching fewer video pictures in memory that way.
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.