在libavcodec/codec_list.c下添加
&ff_h264_qsv_decoder,
在ffmpeg_generate.gni下加入
"libavcodec/h264idct.c",
"libavcodec/h264qpel.c",
"libavcodec/startcode.c",
"libavcodec/h264_mp4toannexb_bsf.c",
]
}
ffmpeg_c_sources += [
"libavcodec/qsvenc_h264.c",
"libavcodec/qsvenc.c",
"libavcodec/qsv.c",
"libavcodec/qsvdec.c",
"libavcodec/qsvdec_h2645.c",
]
libavcodec/bsf_list.c下
static const AVBitStreamFilter * const bitstream_filters[] = {
&ff_h264_mp4toannexb_bsf,
&ff_null_bsf,
NULL };
修改win-msvc/x64/config.h配置
#define CONFIG_H264_QSV_DECODER 1
C++音视频开发学习资料:点击领取→音视频开发(资料文档+视频教程+面试题)(FFmpeg+WebRTC+RTMP+RTSP+HLS+RTP)
h264_decoder_impl_ffmpeg.cc的实现
#include "modules/video_coding/codecs/h264/h264_decoder_impl_ffmpeg.h"
#include
#include
extern "C" {
#include "third_party/ffmpeg/libavcodec/avcodec.h"
#include "third_party/ffmpeg/libavformat/avformat.h"
#include "third_party/ffmpeg/libavutil/imgutils.h"
#include "third_party/ffmpeg/libavutil/opt.h"
} // extern "C"
#include "base/checks.h"
#include "base/criticalsection.h"
#include "base/keep_ref_until_done.h"
#include "base/logging.h"
#include "system_wrappers/include/metrics.h"
#include "libyuv/convert.h"
namespace webrtc {
namespace {
#define PRINT_TIME_DECODE_DELAY 0
const AVPixelFormat kPixelFormat = AV_PIX_FMT_YUV420P;
const size_t kYPlaneIndex = 0;
const size_t kUPlaneIndex = 1;
const size_t kVPlaneIndex = 2;
// Used by histograms. Values of entries should not be changed.
enum H264DecoderImplEvent {
kH264DecoderEventInit = 0,
kH264DecoderEventError = 1,
kH264DecoderEventMax = 16,
};
#if defined(WEBRTC_INITIALIZE_FFMPEG)
rtc::CriticalSection ffmpeg_init_lock;
bool ffmpeg_initialized = false;
// Called by FFmpeg to do mutex operations if initialized using
// |InitializeFFmpeg|.
int LockManagerOperation(void** lock, AVLockOp op)
EXCLUSIVE_LOCK_FUNCTION() UNLOCK_FUNCTION() {
switch (op) {
case AV_LOCK_CREATE:
*lock = new rtc::CriticalSection();
return 0;
case AV_LOCK_OBTAIN:
static_cast(*lock)->Enter();
return 0;
case AV_LOCK_RELEASE:
static_cast(*lock)->Leave();
return 0;
case AV_LOCK_DESTROY:
delete static_cast(*lock);
*lock = nullptr;
return 0;
}
RTC_NOTREACHED() << "Unrecognized AVLockOp.";
return -1;
}
void InitializeFFmpeg() {
LOG_F(LS_INFO);
rtc::CritScope cs(&ffmpeg_init_lock);
if (!ffmpeg_initialized) {
if (av_lockmgr_register(LockManagerOperation) < 0) {
RTC_NOTREACHED() << "av_lockmgr_register failed.";
return;
}
av_register_all();
ffmpeg_initialized = true;
}
}
#endif // defined(WEBRTC_INITIALIZE_FFMPEG)
} // namespace
int H264DecoderImplFfmpeg::AVGetBuffer2(
AVCodecContext* context, AVFrame* av_frame, int flags) {
// Set in |InitDecode|.
H264DecoderImplFfmpeg* decoder = static_cast(context->opaque);
// DCHECK values set in |InitDecode|.
RTC_DCHECK(decoder);
RTC_DCHECK_EQ(context->pix_fmt, kPixelFormat);
// Necessary capability to be allowed to provide our own buffers.
RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
// |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the
// actual image's dimensions and may be different from |context->width| and
// |context->coded_width| due to reordering.
int width = av_frame->width;
int height = av_frame->height;
// See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
// has implications on which resolutions are valid, but we don't use it.
RTC_CHECK_EQ(context->lowres, 0);
// Adjust the |width| and |height| to values acceptable by the decoder.
// Without this, FFmpeg may overflow the buffer. If modified, |width| and/or
// |height| are larger than the actual image and the image has to be cropped
// (top-left corner) after decoding to avoid visible borders to the right and
// bottom of the actual image.
avcodec_align_dimensions(context, &width, &height);
RTC_CHECK_GE(width, 0);
RTC_CHECK_GE(height, 0);
int ret = av_image_check_size(static_cast(width),
static_cast(height), 0, nullptr);
if (ret < 0) {
LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
decoder->ReportError();
return ret;
}
// The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version
// of a video frame and will be set up to reference |video_frame|'s buffers.
VideoFrame* video_frame = new VideoFrame();
// FFmpeg expects the initial allocation to be zero-initialized according to
// http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
video_frame->set_video_frame_buffer(
decoder->pool_.CreateBuffer(width, height));
// DCHECK that we have a continuous buffer as is required.
RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
int total_size = video_frame->allocated_size(kYPlane) +
video_frame->allocated_size(kUPlane) +
video_frame->allocated_size(kVPlane);
av_frame->format = context->pix_fmt;
av_frame->reordered_opaque = context->reordered_opaque;
// Set |av_frame| members as required by FFmpeg.
av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
total_size,
AVFreeBuffer2,
static_cast(video_frame),
0);
RTC_CHECK(av_frame->buf[0]);
return 0;
}
void H264DecoderImplFfmpeg::AVFreeBuffer2(void* opaque, uint8_t* data) {
// The buffer pool recycles the buffer used by |video_frame| when there are no
// more references to it. |video_frame| is a thin buffer holder and is not
// recycled.
VideoFrame* video_frame = static_cast(opaque);
delete video_frame;
}
H264DecoderImplFfmpeg::H264DecoderImplFfmpeg(bool is_hw) : pool_(true),
decoded_image_callback_(nullptr),
has_reported_init_(false),
has_reported_error_(false),
clock_(Clock::GetRealTimeClock()),
isFirstFrame(true),
is_hw_(is_hw) {
start_time_ = clock_->TimeInMilliseconds();
}
H264DecoderImplFfmpeg::~H264DecoderImplFfmpeg() {
Release();
int64_t deltaTimeSec = (clock_->TimeInMilliseconds() - start_time_)/1000;
LOG(LS_INFO) << "discard_cnt_:" << discard_cnt_
<< ", decode_cnt_:" << decode_cnt_
<< ", idr_cnt_:" << idr_cnt_
<< ", decoded_cnt_:" << decoded_cnt_
<< ", deltaTimeSec:" << deltaTimeSec
<< ", average framte rate:" << (deltaTimeSec ? (decoded_cnt_/deltaTimeSec) : decoded_cnt_);
}
void H264DecoderImplFfmpeg::PrintDecoderSettings(const VideoCodec* codec_settings, const AVCodecContext* codec_ctx) {
LOG(LS_INFO) << " ";
LOG(LS_INFO) << "#############################################################";
LOG(LS_INFO) << "# Decoder Parameter Setting: #";
LOG(LS_INFO) << "#############################################################";
LOG(LS_INFO) << "codec name :" << codec_ctx->codec->name;
LOG(LS_INFO) << "codec type :" << codec_ctx->codec_type;
LOG(LS_INFO) << "codec id :" << codec_ctx->codec_id;
LOG(LS_INFO) << "codec_settings.width :" << codec_settings->width;
LOG(LS_INFO) << "codec_settings.height :" << codec_settings->height;
LOG(LS_INFO) << "codec_settings.startBitrate :" << codec_settings->startBitrate;
LOG(LS_INFO) << "codec_settings.maxBitrate :" << codec_settings->maxBitrate;
LOG(LS_INFO) << "codec_settings.minBitrate :" << codec_settings->minBitrate;
LOG(LS_INFO) << "codec_settings.targetBitrate :" << codec_settings->targetBitrate;
LOG(LS_INFO) << "codec_settings.maxFramerate :" << static_cast(codec_settings->maxFramerate);
LOG(LS_INFO) << "------------------------------------------------------------ ";
LOG(LS_INFO) << "codec_ctx.width :" << codec_ctx->width;
LOG(LS_INFO) << "codec_ctx.height :" << codec_ctx->height;
LOG(LS_INFO) << "codec_ctx.pix_fmt :" << codec_ctx->pix_fmt;
LOG(LS_INFO) << "codec_ctx.flags :" << static_cast(codec_ctx->flags);
LOG(LS_INFO) << "codec_ctx.bit_rate :" << codec_ctx->bit_rate;
LOG(LS_INFO) << "#############################################################";
}
int32_t H264DecoderImplFfmpeg::InitHwDecode(const VideoCodec* codec_settings) {
AVCodec* codec = avcodec_find_decoder_by_name("h264_qsv");
if (!codec) {
// This is an indication that FFmpeg has not been initialized or it has not
// been compiled/initialized with the correct set of codecs.
LOG(LS_ERROR) << "FFmpeg H.264 HW decoder not found.";
Release();
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
LOG(LS_INFO) << "Found decoder codec name " << codec->name;
av_context_.reset(avcodec_alloc_context3(codec));
if (codec_settings) {
av_context_->coded_width = codec_settings->width;
av_context_->coded_height = codec_settings->height;
}
av_context_->pix_fmt = AV_PIX_FMT_NV12;
av_opt_set(av_context_->priv_data, "async_depth", "1", 0);
int res = avcodec_open2(av_context_.get(), codec, nullptr);
if (res < 0) {
LOG(LS_ERROR) << "avcodec_open2 error: " << res;
Release();
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
PrintDecoderSettings(codec_settings, av_context_.get());
av_frame_.reset(av_frame_alloc());
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t H264DecoderImplFfmpeg::InitDecode(const VideoCodec* codec_settings,
int32_t number_of_cores) {
LOG_F(LS_INFO);
ReportInit();
isFirstFrame = true;
if (codec_settings &&
codec_settings->codecType != kVideoCodecH264) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// FFmpeg must have been initialized (with |av_lockmgr_register| and
// |av_register_all|) before we proceed. |InitializeFFmpeg| does this, which
// makes sense for WebRTC standalone. In other cases, such as Chromium, FFmpeg
// is initialized externally and calling |InitializeFFmpeg| would be
// thread-unsafe and result in FFmpeg being initialized twice, which could
// break other FFmpeg usage. See the |rtc_initialize_ffmpeg| flag.
#if defined(WEBRTC_INITIALIZE_FFMPEG)
// Make sure FFmpeg has been initialized. Subsequent |InitializeFFmpeg| calls
// do nothing.
InitializeFFmpeg();
#endif
// Release necessary in case of re-initializing.
int32_t ret = Release();
if (ret != WEBRTC_VIDEO_CODEC_OK) {
ReportError();
return ret;
}
RTC_DCHECK(!av_context_);
if (is_hw_) {
return InitHwDecode(codec_settings);
};
// Initialize AVCodecContext.
av_context_.reset(avcodec_alloc_context3(nullptr));
av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
av_context_->codec_id = AV_CODEC_ID_H264;
if (codec_settings) {
av_context_->coded_width = codec_settings->width;
av_context_->coded_height = codec_settings->height;
}
av_context_->pix_fmt = kPixelFormat;
av_context_->extradata = nullptr;
av_context_->extradata_size = 0;
// If this is ever increased, look at |av_context_->thread_safe_callbacks| and
// make it possible to disable the thread checker in the frame buffer pool.
av_context_->thread_count = av_cpu_count() + 1;;
av_context_->thread_type = FF_THREAD_SLICE;
// Function used by FFmpeg to get buffers to store decoded frames in.
av_context_->get_buffer2 = AVGetBuffer2;
// |get_buffer2| is called with the context, there |opaque| can be used to get
// a pointer |this|.
av_context_->opaque = this;
// Use ref counted frames (av_frame_unref).
av_context_->refcounted_frames = 1; // true
AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
if (!codec) {
// This is an indication that FFmpeg has not been initialized or it has not
// been compiled/initialized with the correct set of codecs.
LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
Release();
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
int res = avcodec_open2(av_context_.get(), codec, nullptr);
if (res < 0) {
LOG(LS_ERROR) << "avcodec_open2 error: " << res;
Release();
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
PrintDecoderSettings(codec_settings, av_context_.get());
av_frame_.reset(av_frame_alloc());
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t H264DecoderImplFfmpeg::Release() {
avcodec_close(av_context_.get());
av_context_.reset();
av_frame_.reset();
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t H264DecoderImplFfmpeg::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
decoded_image_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t H264DecoderImplFfmpeg::Decode(const EncodedImage& input_image,
bool /*missing_frames*/,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* codec_specific_info,
int64_t /*render_time_ms*/) {
if (!IsInitialized()) {
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (!decoded_image_callback_) {
LOG(LS_WARNING) << "InitDecode() has been called, but a callback function "
"has not been set with RegisterDecodeCompleteCallback()";
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (!input_image._buffer || !input_image._length) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_specific_info &&
codec_specific_info->codecType != kVideoCodecH264) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if ((input_image._frameType != kVideoFrameKey) && isFirstFrame) {
LOG_F(LS_WARNING) <<" first Frame must be IDR frame";
++discard_cnt_;
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (input_image._frameType == kVideoFrameKey) {
++idr_cnt_;
}
isFirstFrame = false;
#if PRINT_TIME_DECODE_DELAY
int64_t h264_decode_start_time = clock_->TimeInMilliseconds();
#endif
// FFmpeg requires padding due to some optimized bitstream readers reading 32
// or 64 bits at once and could read over the end. See avcodec_decode_video2.
RTC_CHECK_GE(input_image._size, input_image._length +
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
// bitstreams could cause overread and segfault." See
// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
memset(input_image._buffer + input_image._length,
0,
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
AVPacket packet;
av_init_packet(&packet);
packet.data = input_image._buffer;
if (input_image._length >
static_cast(std::numeric_limits::max())) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
packet.size = static_cast(input_image._length);
av_context_->reordered_opaque = input_image.ntp_time_ms_ * 1000; // ms -> us
decode_cnt_++;
int frame_decoded = 0;
RTC_CHECK(av_frame_.get());
int result = avcodec_decode_video2(av_context_.get(),
av_frame_.get(),
&frame_decoded,
&packet);
if (result < 0) {
LOG(LS_ERROR) << "avcodec_decode_video2 error: " << result;
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
// |result| is number of bytes used, which should be all of them.
if (result != packet.size) {
LOG(LS_ERROR) << "avcodec_decode_video2 consumed " << result << " bytes "
"when " << packet.size << " bytes were expected.";
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (!frame_decoded) {
LOG(LS_WARNING) << "avcodec_decode_video2 successful but no frame was "
"decoded.";
return WEBRTC_VIDEO_CODEC_OK;
}
decoded_cnt_++;
#if PRINT_TIME_DECODE_DELAY
int64_t h264_decode_end_time = clock_->TimeInMilliseconds();
int64_t h264_decode_use_time = h264_decode_end_time - h264_decode_start_time;
LOG(LS_INFO) << "Decode: hardware enable: " << is_hw_ << " use_time_ms:" << h264_decode_use_time;
#endif
if (is_hw_) {
if (!temp_frame_) {
temp_frame_.reset(av_frame_alloc());
if (!temp_frame_) {
LOG(LS_ERROR) << "Could not allocate video frame";
return WEBRTC_VIDEO_CODEC_ERROR;
}
temp_frame_->format = AV_PIX_FMT_YUV420P; // FIXED
temp_frame_->width = av_frame_->width;
temp_frame_->height = av_frame_->height;
int ret = av_frame_get_buffer(temp_frame_.get(), 32);
if (ret < 0) {
LOG(LS_ERROR) << "Could not allocate the video frame data";
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Convert NV12 to YUV420
int ret = libyuv::NV12ToI420(av_frame_->data[kYPlane], av_frame_->linesize[0],
av_frame_->data[kUPlane], av_frame_->linesize[1],
temp_frame_->data[kYPlane], av_frame_->linesize[0],
temp_frame_->data[kUPlane], av_frame_->linesize[1] / 2,
temp_frame_->data[kVPlane], av_frame_->linesize[1] / 2,
av_frame_->width, av_frame_->height);
LOG(LS_VERBOSE) << "Decoded Video Frame. input_image._length[" << input_image._length
<< "], input_image._size[" << input_image._size
<< "], decode number[" << decoded_cnt_
<< "], timestamp[" << input_image._timeStamp
<< "], temp_frame width[" << temp_frame_->width
<< "], temp_frame height[" << temp_frame_->height
<< "], temp_frame strideY[" << temp_frame_->linesize[0]
<< "], temp_frame strideU[" << temp_frame_->linesize[1]
<< "], AVFrame width[" << av_frame_->width
<< "], AVFrame height[" << av_frame_->height
<< "], AVFrame lines[0][" << av_frame_->linesize[0]
<< "], AVFrame lines[1][" << av_frame_->linesize[1] << "].";
decoded_frame_.CreateEmptyFrame(av_frame_->width, av_frame_->height, av_frame_->width, av_frame_->width/2, av_frame_->width/2);
uint8_t *dst_y = decoded_frame_.buffer(kYPlane);
uint8_t *src_y = temp_frame_->data[kYPlane];
uint8_t *dst_u = decoded_frame_.buffer(kUPlane);
uint8_t *src_u = temp_frame_->data[kUPlane];
uint8_t *dst_v = decoded_frame_.buffer(kVPlane);
uint8_t *src_v = temp_frame_->data[kVPlane];
memcpy(dst_y, src_y, av_frame_->width * av_frame_->height);
memcpy(dst_u, src_u, av_frame_->width * av_frame_->height/4);
memcpy(dst_v, src_v, av_frame_->width * av_frame_->height/4);
decoded_frame_.set_timestamp(input_image._timeStamp);
decoded_frame_.SetIncomingTimeMs(input_image._incomingTimeMs);
decoded_frame_.SetFrameCnt(decode_cnt_);
ret = decoded_image_callback_->Decoded(decoded_frame_);
// Stop referencing it, possibly freeing |video_frame|.
av_frame_unref(av_frame_.get());
if (ret) {
LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;
return ret;
}
return WEBRTC_VIDEO_CODEC_OK;
} // end of is_hw_
// Obtain the |video_frame| containing the decoded image.
VideoFrame* video_frame = static_cast(
av_buffer_get_opaque(av_frame_->buf[0]));
RTC_DCHECK(video_frame);
RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));
RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));
RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));
video_frame->set_timestamp(input_image._timeStamp);
video_frame->SetIncomingTimeMs(input_image._incomingTimeMs);
LOG(LS_VERBOSE) << "Decoded Video Frame. input_image._length[" << input_image._length
<< "], input_image._size[" << input_image._size
<< "], decode number[" << decode_cnt_
<< "], timestamp[" << input_image._timeStamp
<< "], pointer[" << (void*)(video_frame->video_frame_buffer()->DataY())
<< "],video frame width[" << video_frame->width()
<< "],video frame height[" << video_frame->height()
<< "],video frame strideY[" << video_frame->stride(kYPlane)
<< "],video frame strideU[" << video_frame->stride(kUPlane)
<< "],AVFrame width[" << av_frame_->width
<< "],AVFrame height[" << av_frame_->height
<< "],AVFrame lines[0][" << av_frame_->linesize[0]
<< "],AVFrame lines[1][" << av_frame_->linesize[1] << "].";
int32_t ret = 0;
// The decoded image may be larger than what is supposed to be visible, see
// |AVGetBuffer2|'s use of |avcodec_align_dimensions|. This crops the image
// without copying the underlying buffer.
rtc::scoped_refptr buf = video_frame->video_frame_buffer();
if((av_frame_->width != buf->width()) || (av_frame_->height != buf->height())) {
decoded_frame_.CreateEmptyFrame(av_frame_->width, av_frame_->height, av_frame_->width, av_frame_->width/2, av_frame_->width/2);
uint8_t *dst_y = decoded_frame_.buffer(kYPlane);
uint8_t *src_y = const_cast(video_frame->video_frame_buffer()->DataY());
uint8_t *dst_u = decoded_frame_.buffer(kUPlane);
uint8_t *src_u = const_cast(video_frame->video_frame_buffer()->DataU());
uint8_t *dst_v = decoded_frame_.buffer(kVPlane);
uint8_t *src_v = const_cast(video_frame->video_frame_buffer()->DataV());
if(av_frame_->width == buf->width()) {
memcpy(dst_y, src_y, av_frame_->width * av_frame_->height);
memcpy(dst_u, src_u, av_frame_->width * av_frame_->height/4);
memcpy(dst_v, src_v, av_frame_->width * av_frame_->height/4);
} else {
for(int i = 0; i < av_frame_->height; i++){
memcpy(dst_y, src_y, av_frame_->width);
dst_y += av_frame_->width;
src_y += buf->width();
}
for(int i = 0; i < av_frame_->height/2; i++){
memcpy(dst_u, src_u, av_frame_->width/2);
dst_u += av_frame_->width/2;
src_u += buf->width()/2;
}
for(int i = 0; i < av_frame_->height/2; i++){
memcpy(dst_v, src_v, av_frame_->width/2);
dst_v += av_frame_->width/2;
src_v += buf->width()/2;
}
}
decoded_frame_.set_timestamp(input_image._timeStamp);
decoded_frame_.SetIncomingTimeMs(input_image._incomingTimeMs);
decoded_frame_.SetFrameCnt(decode_cnt_);
ret = decoded_image_callback_->Decoded(decoded_frame_);
} else {
//now not reach here
LOG(LS_ERROR) << "reach error area";
video_frame->SetFrameCnt(decode_cnt_);
ret = decoded_image_callback_->Decoded(*video_frame);
}
// Stop referencing it, possibly freeing |video_frame|.
av_frame_unref(av_frame_.get());
video_frame = nullptr;
if (ret) {
LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;
return ret;
}
return WEBRTC_VIDEO_CODEC_OK;
}
bool H264DecoderImplFfmpeg::IsInitialized() const {
return av_context_ != nullptr;
}
void H264DecoderImplFfmpeg::ReportInit() {
if (has_reported_init_)
return;
RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
kH264DecoderEventInit,
kH264DecoderEventMax);
has_reported_init_ = true;
}
void H264DecoderImplFfmpeg::ReportError() {
if (has_reported_error_)
return;
RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
kH264DecoderEventError,
kH264DecoderEventMax);
has_reported_error_ = true;
}
} // namespace webrtc
qsv硬解码,解码器中会缓存2帧视频,按照fps=15算的话,一帧60ms,2帧的话会延迟120ms左右。用在播放器中可以,用在RTC中会导致时延变大,也可能有对应的优化参数。
留言与评论(共有 0 条评论) “” |