Heim > Fragen und Antworten > Hauptteil
ios 使用 ffmpeg 将硬编码之后的音视频数据保存为mp4, 结果音频播放正常,视频非常的卡,音频播放完的时候,视频还有3/4没有播完,好几秒跳一帧。时间戳打印了一下,发现没有问题,下面在计算pts的时候好像有问题,但不知道该怎么处理,pkt.pts = av_rescale_q(num_frame, vSt->codec->time_base, vSt->time_base);
这一行代码 若将 num_frame /5 之后这视频播放挺流畅,但音视频还是有少许的错位。
保存MP4用的是 ffmpeg 中的av_interleaved_write_frame
等接口,具体代码在下面:
// 写视频数据
void h264_record::write_frame( const void* p, int len, uint32_t timestamp )
{
// printf("tttttttttttttttttt: %lld\n", timestamp);
CriticalSectionScoped lock(_recordVoipCrit);
if (!formatCtxt_ || videoStreamdIndex_ < 0 )
return;
AVStream *vSt = formatCtxt_->streams[videoStreamdIndex_];
AVStream *pst = formatCtxt_->streams[ audioStreamdIndex_ ];
// Init packet
AVPacket pkt;
av_init_packet( &pkt );
pkt.flags |= ( 0 == getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
pkt.stream_index = vSt->index;
pkt.data = (uint8_t*)p;
pkt.size = len;
// Wait for key frame
if ( waitkey_ ) {
if ( 0 == ( pkt.flags & AV_PKT_FLAG_KEY ) )
return;
else
waitkey_ = 0;
}
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
if(baseH264TimeStamp_ == 0) {
baseH264TimeStamp_ = timestamp;
}
AVCodecContext *avccxt = vSt->codec;
float seconds = (float)(timestamp - baseH264TimeStamp_)/90000;
if (seconds < 0) {
WEBRTC_TRACE(cloopenwebrtc::kTraceError,
cloopenwebrtc::kTraceVideoCoding,
0,
"timestamp:%d baseH264TimeStamp_:%d seconds:%f\n",
timestamp, baseH264TimeStamp_, seconds);
return;
}
// 1/fps
float timebase = (float)avccxt->time_base.num/avccxt->time_base.den;
//算出这是第几帧, 不知道这个地方有没有问题
uint32_t num_frame = (uint32_t)(seconds / timebase);
if(num_frame !=0 && (num_frame <= lastVideoFrameNum_)) {
num_frame = lastVideoFrameNum_ + 1;
}
pkt.pts = av_rescale_q(num_frame, vSt->codec->time_base, vSt->time_base);
lastVideoFrameNum_ = num_frame;
int ret = av_interleaved_write_frame(formatCtxt_, &pkt);
if(ret != 0) {
}
}
VideoStream 创建设置:
static AVStream* add_video_stream(AVFormatContext *oc, enum AVCodecID codec_id, unsigned char *data)
{
AVStream *formatSt = avformat_new_stream(oc, NULL);
if (!formatSt) {
WEBRTC_TRACE(cloopenwebrtc::kTraceError,
cloopenwebrtc::kTraceVideoCoding,
0,
"Could not allocate stream\n");
return NULL;
}
formatSt->id = 0;//oc->nb_streams-1;
AVCodec codec= {0};
codec.type= AVMEDIA_TYPE_VIDEO;
AVCodecContext *context = formatSt->codec;
avcodec_get_context_defaults3( context, &codec );
context->codec_type = AVMEDIA_TYPE_VIDEO;
context->codec_id = AV_CODEC_ID_H264;
context->pix_fmt = AV_PIX_FMT_YUV420P;
formatSt->time_base.num = 1;
formatSt->time_base.den = 90000;
context->time_base.num = 1;
context->time_base.den = 15;
context->extradata = (unsigned char*)malloc(34);
memcpy(context->extradata, data, 34); //pps sps data
context->extradata_size = 34;
//c->bits_per_coded_sample = 0;
SequenceParameterSet spsSet;
memset(&spsSet, 0, sizeof(SequenceParameterSet));
spsSet.Parse(context->extradata+4, 34);
//c->bit_rate = 0;
context->width = (spsSet.pic_width_in_mbs_minus1+1)*16;
context->height = (spsSet.pic_height_in_map_units_minus1 +1)*16;
///* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
return formatSt;
}
这是我第一次使用ffmpeg ,代码的流程基本知道怎么回事了。问题在网上找了一周多了,github上相关的代码也搜了,实在找不到什么问题,希望大家会的能帮帮忙,谢谢!