如何使用FFmpeg C API剪切video

如何使用FFmpeg C API剪切video? 例如从00:10:00到00:20:00。 我需要使用哪些function?

我使用此function转换video:

int convert(char *file) { AVFrame *frame; AVPacket inPacket, outPacket; if(avio_open(&outFormatContext->pb, file, AVIO_FLAG_WRITE) = 0) { if(inPacket.stream_index == inVideoStreamIndex) { avcodec_decode_video2(inCodecContext, frame, &frameFinished, &inPacket); if(frameFinished) { av_init_packet(&outPacket); avcodec_encode_video2(outCodecContext, &outPacket, frame, &outputed); if(outputed) { if (av_write_frame(outFormatContext, &outPacket) != 0) { fprintf(stderr, "convert(): error while writing video frame\n"); return 0; } } av_free_packet(&outPacket); } } } av_write_trailer(outFormatContext); av_free_packet(&inPacket); return 1; } 

如果您只想剪切video,则无需根据需要重新编码video。 所以我想你想要出于某种原因切割和重新编码 。 所以, 根据你的代码

观察您必须访问videoAVStream*结构…我将其命名为inVideoStream

 int convert_and_cut(char *file, float starttime, float endtime) { AVFrame *frame; AVPacket inPacket, outPacket; if(avio_open(&outFormatContext->pb, file, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "convert(): cannot open out file\n"); return 0; } // seek to the start time you wish. // BEGIN AVRational default_timebase; default_timebase.num = 1; default_timebase.den = AV_TIME_BASE; // suppose you have access to the "inVideoStream" of course int64_t starttime_int64 = av_rescale_q((int64_t)( starttime * AV_TIME_BASE ), default_timebase, inVideoStream->time_base); int64_t endtime_int64 = av_rescale_q((int64_t)( endtime * AV_TIME_BASE ), default_timebase, inVideoStream->time_base); if(avformat_seek_file(inFormatContext, inVideoStreamIndex, INT64_MIN, starttime_int64, INT64_MAX, 0) < 0) { // error... do something... return 0; // usually 0 is used for success in C, but I am following your code. } avcodec_flush_buffers( inVideoStream->codec ); // END avformat_write_header(outFormatContext, NULL); frame = avcodec_alloc_frame(); av_init_packet(&inPacket); // you used avformat_seek_file() to seek CLOSE to the point you want... in order to give precision to your seek, // just go on reading the packets and checking the packets PTS (presentation timestamp) while(av_read_frame(inFormatContext, &inPacket) >= 0) { if(inPacket.stream_index == inVideoStreamIndex) { avcodec_decode_video2(inCodecContext, frame, &frameFinished, &inPacket); // this line guarantees you are getting what you really want. if(frameFinished && frame->pkt_pts >= starttime_int64 && frame->pkt_pts <= endtime_int64) { av_init_packet(&outPacket); avcodec_encode_video2(outCodecContext, &outPacket, frame, &outputed); if(outputed) { if (av_write_frame(outFormatContext, &outPacket) != 0) { fprintf(stderr, "convert(): error while writing video frame\n"); return 0; } } av_free_packet(&outPacket); } // exit the loop if you got the frames you want. if(frame->pkt_pts > endtime_int64) { break; } } } av_write_trailer(outFormatContext); av_free_packet(&inPacket); return 1; } 

正如Wagner Patriota所说,“如果你只想剪切video,如果你愿意,你不需要重新编码video”。 以下是基于ffmpeg remuxing.c示例的代码,您无需重新编码video。

 #include  #include  static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag) { AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base; printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n", tag, av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base), av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base), av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base), pkt->stream_index); } int cut_video(double from_seconds, double end_seconds, const char* in_filename, const char* out_filename) { AVOutputFormat *ofmt = NULL; AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; AVPacket pkt; int ret, i; av_register_all(); if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { fprintf(stderr, "Could not open input file '%s'", in_filename); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { fprintf(stderr, "Failed to retrieve input stream information"); goto end; } av_dump_format(ifmt_ctx, 0, in_filename, 0); avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { fprintf(stderr, "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt = ofmt_ctx->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { AVStream *in_stream = ifmt_ctx->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); if (!out_stream) { fprintf(stderr, "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } ret = avcodec_copy_context(out_stream->codec, in_stream->codec); if (ret < 0) { fprintf(stderr, "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } av_dump_format(ofmt_ctx, 0, out_filename, 1); if (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Could not open output file '%s'", out_filename); goto end; } } ret = avformat_write_header(ofmt_ctx, NULL); if (ret < 0) { fprintf(stderr, "Error occurred when opening output file\n"); goto end; } // int indexs[8] = {0}; // int64_t start_from = 8*AV_TIME_BASE; ret = av_seek_frame(ifmt_ctx, -1, from_seconds*AV_TIME_BASE, AVSEEK_FLAG_ANY); if (ret < 0) { fprintf(stderr, "Error seek\n"); goto end; } int64_t *dts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams); memset(dts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams); int64_t *pts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams); memset(pts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams); while (1) { AVStream *in_stream, *out_stream; ret = av_read_frame(ifmt_ctx, &pkt); if (ret < 0) break; in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[pkt.stream_index]; log_packet(ifmt_ctx, &pkt, "in"); if (av_q2d(in_stream->time_base) * pkt.pts > end_seconds) { av_free_packet(&pkt); break; } if (dts_start_from[pkt.stream_index] == 0) { dts_start_from[pkt.stream_index] = pkt.dts; printf("dts_start_from: %s\n", av_ts2str(dts_start_from[pkt.stream_index])); } if (pts_start_from[pkt.stream_index] == 0) { pts_start_from[pkt.stream_index] = pkt.pts; printf("pts_start_from: %s\n", av_ts2str(pts_start_from[pkt.stream_index])); } /* copy packet */ pkt.pts = av_rescale_q_rnd(pkt.pts - pts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); pkt.dts = av_rescale_q_rnd(pkt.dts - dts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); if (pkt.pts < 0) { pkt.pts = 0; } if (pkt.dts < 0) { pkt.dts = 0; } pkt.duration = (int)av_rescale_q((int64_t)pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; log_packet(ofmt_ctx, &pkt, "out"); printf("\n"); ret = av_interleaved_write_frame(ofmt_ctx, &pkt); if (ret < 0) { fprintf(stderr, "Error muxing packet\n"); break; } av_free_packet(&pkt); } free(dts_start_from); free(pts_start_from); av_write_trailer(ofmt_ctx); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_closep(&ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); return 1; } return 0; }