
# HG changeset patch
# User Landry Breuil <landry@openbsd.org>
# Date 1763998694 0
# Node ID 568e210639f79dda7823455a3fded66735466344
# Parent  f23d592e80b9f929c85df96449b057503fd3a53c
Bug 1962139 - Adapt FFmpegVideoDecoder for ffmpeg8 r=media-playback-reviewers,alwu

Provide IsKeyFrame() helper handling various versions, and use it where
appropriate. Also, In ffmpeg 8 the offset is to be found in the packet struct,
not anymore in the frame struct, so use a variable local to DoDecode()

Differential Revision: https://phabricator.services.mozilla.com/D272254


diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -1060,16 +1060,24 @@ void FFmpegVideoDecoder<LIBAV_VER>::Init
 static int64_t GetFramePts(const AVFrame* aFrame) {
 #if LIBAVCODEC_VERSION_MAJOR > 57
   return aFrame->pts;
 #else
   return aFrame->pkt_pts;
 #endif
 }
 
+static bool IsKeyFrame(const AVFrame* aFrame) {
+#if LIBAVCODEC_VERSION_MAJOR > 61
+  return !!(aFrame->flags & AV_FRAME_FLAG_KEY);
+#else
+  return !!aFrame->key_frame;
+#endif
+}
+
 #if LIBAVCODEC_VERSION_MAJOR >= 58
 void FFmpegVideoDecoder<LIBAV_VER>::DecodeStats::DecodeStart() {
   mDecodeStart = TimeStamp::Now();
 }
 
 bool FFmpegVideoDecoder<LIBAV_VER>::DecodeStats::IsDecodingSlow() const {
   return mDecodedFramesLate > mMaxLateDecodedFrames;
 }
@@ -1268,16 +1276,22 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
     // Release unused VA-API surfaces before avcodec_receive_frame() as
     // ffmpeg recycles VASurface for HW decoding.
     if (mVideoFramePool) {
       mVideoFramePool->ReleaseUnusedVAAPIFrames();
     }
 #  endif
 
     int res = mLib->avcodec_receive_frame(mCodecContext, mFrame);
+    int64_t fpos =
+#  if LIBAVCODEC_VERSION_MAJOR > 61
+        packet->pos;
+#  else
+        mFrame->pkt_pos;
+#  endif
     if (res == int(AVERROR_EOF)) {
       if (MaybeQueueDrain(aResults)) {
         FFMPEG_LOG("  Output buffer shortage.");
         return NS_ERROR_NOT_AVAILABLE;
       }
       FFMPEG_LOG("  End of stream.");
       return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
     }
@@ -1303,53 +1317,52 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
         PROFILER_MARKER_TEXT("FFmpegVideoDecoder::DoDecode", MEDIA_PLAYBACK, {},
                              "Fallback to SW decode");
         FFMPEG_LOG("  HW decoding is slow, switching back to SW decode");
         return MediaResult(
             NS_ERROR_DOM_MEDIA_DECODE_ERR,
             RESULT_DETAIL("HW decoding is slow, switching back to SW decode"));
       }
       if (mUsingV4L2) {
-        rv = CreateImageV4L2(mFrame->pkt_pos, GetFramePts(mFrame),
-                             Duration(mFrame), aResults);
+        rv = CreateImageV4L2(fpos, GetFramePts(mFrame), Duration(mFrame),
+                             aResults);
       } else {
-        rv = CreateImageVAAPI(mFrame->pkt_pos, GetFramePts(mFrame),
-                              Duration(mFrame), aResults);
+        rv = CreateImageVAAPI(fpos, GetFramePts(mFrame), Duration(mFrame),
+                              aResults);
       }
 
       // If VA-API/V4L2 playback failed, just quit. Decoder is going to be
       // restarted without hardware acceleration
       if (NS_FAILED(rv)) {
         // Explicitly remove dmabuf surface pool as it's configured
         // for VA-API/V4L2 support.
         mVideoFramePool = nullptr;
         return rv;
       }
 #    elif defined(MOZ_ENABLE_D3D11VA)
       mDecodeStats.UpdateDecodeTimes(Duration(mFrame));
-      rv = CreateImageD3D11(mFrame->pkt_pos, GetFramePts(mFrame),
-                            Duration(mFrame), aResults);
+      rv = CreateImageD3D11(fpos, GetFramePts(mFrame), Duration(mFrame),
+                            aResults);
 #    elif defined(MOZ_WIDGET_ANDROID)
       InputInfo info(aSample);
       info.mTimecode = -1;
       TakeInputInfo(mFrame, info);
       mDecodeStats.UpdateDecodeTimes(info.mDuration);
-      rv = CreateImageMediaCodec(mFrame->pkt_pos, GetFramePts(mFrame),
-                                 info.mTimecode, info.mDuration, aResults);
+      rv = CreateImageMediaCodec(fpos, GetFramePts(mFrame), info.mTimecode,
+                                 info.mDuration, aResults);
 #    else
       mDecodeStats.UpdateDecodeTimes(Duration(mFrame));
       return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                          RESULT_DETAIL("No HW decoding implementation!"));
 #    endif
     } else
 #  endif
     {
       mDecodeStats.UpdateDecodeTimes(Duration(mFrame));
-      rv = CreateImage(mFrame->pkt_pos, GetFramePts(mFrame), Duration(mFrame),
-                       aResults);
+      rv = CreateImage(fpos, GetFramePts(mFrame), Duration(mFrame), aResults);
     }
     if (NS_FAILED(rv)) {
       return rv;
     }
 
     RecordFrame(aSample, aResults.LastElement());
     if (aGotFrame) {
       *aGotFrame = true;
@@ -1699,17 +1712,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
       !requiresCopy) {
     RefPtr<ImageBufferWrapper> wrapper = static_cast<ImageBufferWrapper*>(
         mLib->av_buffer_get_opaque(mFrame->buf[0]));
     MOZ_ASSERT(wrapper);
     FFMPEG_LOGV("Create a video data from a shmem image=%p", wrapper.get());
     v = VideoData::CreateFromImage(
         mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
         TimeUnit::FromMicroseconds(aDuration), wrapper->AsImage(),
-        !!mFrame->key_frame, TimeUnit::FromMicroseconds(-1));
+        IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(-1));
   }
 #endif
 #if defined(MOZ_WIDGET_GTK) && defined(MOZ_USE_HWDECODE)
   if (mUploadSWDecodeToDMABuf) {
     MOZ_DIAGNOSTIC_ASSERT(!v);
     if (!mVideoFramePool) {
       mVideoFramePool = MakeUnique<VideoFramePool<LIBAV_VER>>(10);
     }
@@ -1737,17 +1750,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
                 ? ColorSpace2ToString(mInfo.mColorPrimaries.value())
                 : "unknown",
             mInfo.mTransferFunction
                 ? TransferFunctionToString(mInfo.mTransferFunction.value())
                 : "unknown");
         v = VideoData::CreateFromImage(
             mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
             TimeUnit::FromMicroseconds(aDuration), surface->GetAsImage(),
-            !!mFrame->key_frame, TimeUnit::FromMicroseconds(-1));
+            IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(-1));
       } else {
         FFMPEG_LOG("Failed to uploaded video data to DMABuf");
       }
     } else {
       FFMPEG_LOG("Failed to convert PlanarYCbCrData");
     }
   }
 #endif
@@ -1757,17 +1770,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
       if (NS_FAILED(ret.Code())) {
         FFMPEG_LOG("%s: %s", __func__, ret.Message().get());
         return ret;
       }
     }
     Result<already_AddRefed<VideoData>, MediaResult> r =
         VideoData::CreateAndCopyData(
             mInfo, mImageContainer, aOffset, TimeUnit::FromMicroseconds(aPts),
-            TimeUnit::FromMicroseconds(aDuration), b, !!mFrame->key_frame,
+            TimeUnit::FromMicroseconds(aDuration), b, IsKeyFrame(mFrame),
             TimeUnit::FromMicroseconds(mFrame->pkt_dts),
             mInfo.ScaledImageRect(mFrame->width, mFrame->height),
             mImageAllocator);
     if (r.isErr()) {
       return r.unwrapErr();
     }
     v = r.unwrap();
   }
@@ -1835,21 +1848,20 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
              aPts, mFrame->pkt_dts, aDuration,
              YUVColorSpaceToString(GetFrameColorSpace()),
              mInfo.mColorPrimaries
                  ? ColorSpace2ToString(mInfo.mColorPrimaries.value())
                  : "unknown",
              mInfo.mTransferFunction
                  ? TransferFunctionToString(mInfo.mTransferFunction.value())
                  : "unknown");
-
   RefPtr<VideoData> vp = VideoData::CreateFromImage(
       mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
       TimeUnit::FromMicroseconds(aDuration), surface->GetAsImage(),
-      !!mFrame->key_frame, TimeUnit::FromMicroseconds(mFrame->pkt_dts));
+      IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(mFrame->pkt_dts));
 
   if (!vp) {
     return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                        RESULT_DETAIL("VAAPI image allocation error"));
   }
 
   aResults.AppendElement(std::move(vp));
   return NS_OK;
@@ -1888,17 +1900,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
                        RESULT_DETAIL("V4L2 dmabuf allocation error"));
   }
   surface->SetYUVColorSpace(GetFrameColorSpace());
   surface->SetColorRange(GetFrameColorRange());
 
   RefPtr<VideoData> vp = VideoData::CreateFromImage(
       mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
       TimeUnit::FromMicroseconds(aDuration), surface->GetAsImage(),
-      !!mFrame->key_frame, TimeUnit::FromMicroseconds(mFrame->pkt_dts));
+      IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(mFrame->pkt_dts));
 
   if (!vp) {
     return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                        RESULT_DETAIL("V4L2 image creation error"));
   }
 
   aResults.AppendElement(std::move(vp));
   return NS_OK;
@@ -2342,17 +2354,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
     nsPrintfCString msg("Failed to create a D3D image");
     FFMPEG_LOG("%s", msg.get());
     return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, msg);
   }
   MOZ_ASSERT(image);
 
   RefPtr<VideoData> v = VideoData::CreateFromImage(
       mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
-      TimeUnit::FromMicroseconds(aDuration), image, !!mFrame->key_frame,
+      TimeUnit::FromMicroseconds(aDuration), image, IsKeyFrame(mFrame),
       TimeUnit::FromMicroseconds(mFrame->pkt_dts));
   if (!v) {
     nsPrintfCString msg("D3D image allocation error");
     FFMPEG_LOG("%s", msg.get());
     return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, msg);
   }
   aResults.AppendElement(std::move(v));
   return NS_OK;

