FFmpeg  4.2.2
muxing.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 /**
24  * @file
25  * libavformat API example.
26  *
27  * Output a media file in any supported libavformat format. The default
28  * codecs are used.
29  * @example muxing.c
30  */
31 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
37 #include <libavutil/avassert.h>
39 #include <libavutil/opt.h>
40 #include <libavutil/mathematics.h>
41 #include <libavutil/timestamp.h>
42 #include <libavformat/avformat.h>
43 #include <libswscale/swscale.h>
45 
46 #define STREAM_DURATION 10.0
47 #define STREAM_FRAME_RATE 25 /* 25 images/s */
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
49 
50 #define SCALE_FLAGS SWS_BICUBIC
51 
52 // a wrapper around a single output AVStream
53 typedef struct OutputStream {
56 
57  /* pts of the next frame that will be generated */
58  int64_t next_pts;
60 
63 
64  float t, tincr, tincr2;
65 
66  struct SwsContext *sws_ctx;
68 } OutputStream;
69 
70 static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
71 {
72  AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
73 
74  printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
75  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
76  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
77  av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
78  pkt->stream_index);
79 }
80 
82 {
83  /* rescale output packet timestamp values from codec to stream timebase */
84  av_packet_rescale_ts(pkt, *time_base, st->time_base);
85  pkt->stream_index = st->index;
86 
87  /* Write the compressed frame to the media file. */
88  log_packet(fmt_ctx, pkt);
89  return av_interleaved_write_frame(fmt_ctx, pkt);
90 }
91 
92 /* Add an output stream. */
93 static void add_stream(OutputStream *ost, AVFormatContext *oc,
94  AVCodec **codec,
95  enum AVCodecID codec_id)
96 {
97  AVCodecContext *c;
98  int i;
99 
100  /* find the encoder */
101  *codec = avcodec_find_encoder(codec_id);
102  if (!(*codec)) {
103  fprintf(stderr, "Could not find encoder for '%s'\n",
104  avcodec_get_name(codec_id));
105  exit(1);
106  }
107 
108  ost->st = avformat_new_stream(oc, NULL);
109  if (!ost->st) {
110  fprintf(stderr, "Could not allocate stream\n");
111  exit(1);
112  }
113  ost->st->id = oc->nb_streams-1;
114  c = avcodec_alloc_context3(*codec);
115  if (!c) {
116  fprintf(stderr, "Could not alloc an encoding context\n");
117  exit(1);
118  }
119  ost->enc = c;
120 
121  switch ((*codec)->type) {
122  case AVMEDIA_TYPE_AUDIO:
123  c->sample_fmt = (*codec)->sample_fmts ?
124  (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
125  c->bit_rate = 64000;
126  c->sample_rate = 44100;
127  if ((*codec)->supported_samplerates) {
128  c->sample_rate = (*codec)->supported_samplerates[0];
129  for (i = 0; (*codec)->supported_samplerates[i]; i++) {
130  if ((*codec)->supported_samplerates[i] == 44100)
131  c->sample_rate = 44100;
132  }
133  }
136  if ((*codec)->channel_layouts) {
137  c->channel_layout = (*codec)->channel_layouts[0];
138  for (i = 0; (*codec)->channel_layouts[i]; i++) {
139  if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
141  }
142  }
144  ost->st->time_base = (AVRational){ 1, c->sample_rate };
145  break;
146 
147  case AVMEDIA_TYPE_VIDEO:
148  c->codec_id = codec_id;
149 
150  c->bit_rate = 400000;
151  /* Resolution must be a multiple of two. */
152  c->width = 352;
153  c->height = 288;
154  /* timebase: This is the fundamental unit of time (in seconds) in terms
155  * of which frame timestamps are represented. For fixed-fps content,
156  * timebase should be 1/framerate and timestamp increments should be
157  * identical to 1. */
158  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
159  c->time_base = ost->st->time_base;
160 
161  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
162  c->pix_fmt = STREAM_PIX_FMT;
163  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
164  /* just for testing, we also add B-frames */
165  c->max_b_frames = 2;
166  }
167  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
168  /* Needed to avoid using macroblocks in which some coeffs overflow.
169  * This does not happen with normal video, it just happens here as
170  * the motion of the chroma plane does not match the luma plane. */
171  c->mb_decision = 2;
172  }
173  break;
174 
175  default:
176  break;
177  }
178 
179  /* Some formats want stream headers to be separate. */
180  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
182 }
183 
184 /**************************************************************/
185 /* audio output */
186 
187 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
188  uint64_t channel_layout,
189  int sample_rate, int nb_samples)
190 {
192  int ret;
193 
194  if (!frame) {
195  fprintf(stderr, "Error allocating an audio frame\n");
196  exit(1);
197  }
198 
199  frame->format = sample_fmt;
200  frame->channel_layout = channel_layout;
201  frame->sample_rate = sample_rate;
202  frame->nb_samples = nb_samples;
203 
204  if (nb_samples) {
205  ret = av_frame_get_buffer(frame, 0);
206  if (ret < 0) {
207  fprintf(stderr, "Error allocating an audio buffer\n");
208  exit(1);
209  }
210  }
211 
212  return frame;
213 }
214 
215 static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
216 {
217  AVCodecContext *c;
218  int nb_samples;
219  int ret;
220  AVDictionary *opt = NULL;
221 
222  c = ost->enc;
223 
224  /* open it */
225  av_dict_copy(&opt, opt_arg, 0);
226  ret = avcodec_open2(c, codec, &opt);
227  av_dict_free(&opt);
228  if (ret < 0) {
229  fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
230  exit(1);
231  }
232 
233  /* init signal generator */
234  ost->t = 0;
235  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
236  /* increment frequency by 110 Hz per second */
237  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
238 
240  nb_samples = 10000;
241  else
242  nb_samples = c->frame_size;
243 
245  c->sample_rate, nb_samples);
247  c->sample_rate, nb_samples);
248 
249  /* copy the stream parameters to the muxer */
251  if (ret < 0) {
252  fprintf(stderr, "Could not copy the stream parameters\n");
253  exit(1);
254  }
255 
256  /* create resampler context */
257  ost->swr_ctx = swr_alloc();
258  if (!ost->swr_ctx) {
259  fprintf(stderr, "Could not allocate resampler context\n");
260  exit(1);
261  }
262 
263  /* set options */
264  av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
265  av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
266  av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
267  av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
268  av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
269  av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
270 
271  /* initialize the resampling context */
272  if ((ret = swr_init(ost->swr_ctx)) < 0) {
273  fprintf(stderr, "Failed to initialize the resampling context\n");
274  exit(1);
275  }
276 }
277 
278 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
279  * 'nb_channels' channels. */
281 {
282  AVFrame *frame = ost->tmp_frame;
283  int j, i, v;
284  int16_t *q = (int16_t*)frame->data[0];
285 
286  /* check if we want to generate more frames */
287  if (av_compare_ts(ost->next_pts, ost->enc->time_base,
288  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
289  return NULL;
290 
291  for (j = 0; j <frame->nb_samples; j++) {
292  v = (int)(sin(ost->t) * 10000);
293  for (i = 0; i < ost->enc->channels; i++)
294  *q++ = v;
295  ost->t += ost->tincr;
296  ost->tincr += ost->tincr2;
297  }
298 
299  frame->pts = ost->next_pts;
300  ost->next_pts += frame->nb_samples;
301 
302  return frame;
303 }
304 
305 /*
306  * encode one audio frame and send it to the muxer
307  * return 1 when encoding is finished, 0 otherwise
308  */
310 {
311  AVCodecContext *c;
312  AVPacket pkt = { 0 }; // data and size must be 0;
313  AVFrame *frame;
314  int ret;
315  int got_packet;
316  int dst_nb_samples;
317 
318  av_init_packet(&pkt);
319  c = ost->enc;
320 
321  frame = get_audio_frame(ost);
322 
323  if (frame) {
324  /* convert samples from native format to destination codec format, using the resampler */
325  /* compute destination number of samples */
326  dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
328  av_assert0(dst_nb_samples == frame->nb_samples);
329 
330  /* when we pass a frame to the encoder, it may keep a reference to it
331  * internally;
332  * make sure we do not overwrite it here
333  */
334  ret = av_frame_make_writable(ost->frame);
335  if (ret < 0)
336  exit(1);
337 
338  /* convert to destination format */
339  ret = swr_convert(ost->swr_ctx,
340  ost->frame->data, dst_nb_samples,
341  (const uint8_t **)frame->data, frame->nb_samples);
342  if (ret < 0) {
343  fprintf(stderr, "Error while converting\n");
344  exit(1);
345  }
346  frame = ost->frame;
347 
348  frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
349  ost->samples_count += dst_nb_samples;
350  }
351 
352  ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
353  if (ret < 0) {
354  fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
355  exit(1);
356  }
357 
358  if (got_packet) {
359  ret = write_frame(oc, &c->time_base, ost->st, &pkt);
360  if (ret < 0) {
361  fprintf(stderr, "Error while writing audio frame: %s\n",
362  av_err2str(ret));
363  exit(1);
364  }
365  }
366 
367  return (frame || got_packet) ? 0 : 1;
368 }
369 
370 /**************************************************************/
371 /* video output */
372 
374 {
375  AVFrame *picture;
376  int ret;
377 
378  picture = av_frame_alloc();
379  if (!picture)
380  return NULL;
381 
382  picture->format = pix_fmt;
383  picture->width = width;
384  picture->height = height;
385 
386  /* allocate the buffers for the frame data */
387  ret = av_frame_get_buffer(picture, 32);
388  if (ret < 0) {
389  fprintf(stderr, "Could not allocate frame data.\n");
390  exit(1);
391  }
392 
393  return picture;
394 }
395 
396 static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
397 {
398  int ret;
399  AVCodecContext *c = ost->enc;
400  AVDictionary *opt = NULL;
401 
402  av_dict_copy(&opt, opt_arg, 0);
403 
404  /* open the codec */
405  ret = avcodec_open2(c, codec, &opt);
406  av_dict_free(&opt);
407  if (ret < 0) {
408  fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
409  exit(1);
410  }
411 
412  /* allocate and init a re-usable frame */
413  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
414  if (!ost->frame) {
415  fprintf(stderr, "Could not allocate video frame\n");
416  exit(1);
417  }
418 
419  /* If the output format is not YUV420P, then a temporary YUV420P
420  * picture is needed too. It is then converted to the required
421  * output format. */
422  ost->tmp_frame = NULL;
423  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
425  if (!ost->tmp_frame) {
426  fprintf(stderr, "Could not allocate temporary picture\n");
427  exit(1);
428  }
429  }
430 
431  /* copy the stream parameters to the muxer */
433  if (ret < 0) {
434  fprintf(stderr, "Could not copy the stream parameters\n");
435  exit(1);
436  }
437 }
438 
439 /* Prepare a dummy image. */
440 static void fill_yuv_image(AVFrame *pict, int frame_index,
441  int width, int height)
442 {
443  int x, y, i;
444 
445  i = frame_index;
446 
447  /* Y */
448  for (y = 0; y < height; y++)
449  for (x = 0; x < width; x++)
450  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
451 
452  /* Cb and Cr */
453  for (y = 0; y < height / 2; y++) {
454  for (x = 0; x < width / 2; x++) {
455  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
456  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
457  }
458  }
459 }
460 
462 {
463  AVCodecContext *c = ost->enc;
464 
465  /* check if we want to generate more frames */
466  if (av_compare_ts(ost->next_pts, c->time_base,
467  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
468  return NULL;
469 
470  /* when we pass a frame to the encoder, it may keep a reference to it
471  * internally; make sure we do not overwrite it here */
472  if (av_frame_make_writable(ost->frame) < 0)
473  exit(1);
474 
475  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
476  /* as we only generate a YUV420P picture, we must convert it
477  * to the codec pixel format if needed */
478  if (!ost->sws_ctx) {
479  ost->sws_ctx = sws_getContext(c->width, c->height,
481  c->width, c->height,
482  c->pix_fmt,
483  SCALE_FLAGS, NULL, NULL, NULL);
484  if (!ost->sws_ctx) {
485  fprintf(stderr,
486  "Could not initialize the conversion context\n");
487  exit(1);
488  }
489  }
490  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
491  sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
492  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
493  ost->frame->linesize);
494  } else {
495  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
496  }
497 
498  ost->frame->pts = ost->next_pts++;
499 
500  return ost->frame;
501 }
502 
503 /*
504  * encode one video frame and send it to the muxer
505  * return 1 when encoding is finished, 0 otherwise
506  */
508 {
509  int ret;
510  AVCodecContext *c;
511  AVFrame *frame;
512  int got_packet = 0;
513  AVPacket pkt = { 0 };
514 
515  c = ost->enc;
516 
517  frame = get_video_frame(ost);
518 
519  av_init_packet(&pkt);
520 
521  /* encode the image */
522  ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
523  if (ret < 0) {
524  fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
525  exit(1);
526  }
527 
528  if (got_packet) {
529  ret = write_frame(oc, &c->time_base, ost->st, &pkt);
530  } else {
531  ret = 0;
532  }
533 
534  if (ret < 0) {
535  fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
536  exit(1);
537  }
538 
539  return (frame || got_packet) ? 0 : 1;
540 }
541 
543 {
544  avcodec_free_context(&ost->enc);
545  av_frame_free(&ost->frame);
546  av_frame_free(&ost->tmp_frame);
547  sws_freeContext(ost->sws_ctx);
548  swr_free(&ost->swr_ctx);
549 }
550 
551 /**************************************************************/
552 /* media file output */
553 
554 int main(int argc, char **argv)
555 {
556  OutputStream video_st = { 0 }, audio_st = { 0 };
557  const char *filename;
558  AVOutputFormat *fmt;
559  AVFormatContext *oc;
560  AVCodec *audio_codec, *video_codec;
561  int ret;
562  int have_video = 0, have_audio = 0;
563  int encode_video = 0, encode_audio = 0;
564  AVDictionary *opt = NULL;
565  int i;
566 
567  if (argc < 2) {
568  printf("usage: %s output_file\n"
569  "API example program to output a media file with libavformat.\n"
570  "This program generates a synthetic audio and video stream, encodes and\n"
571  "muxes them into a file named output_file.\n"
572  "The output format is automatically guessed according to the file extension.\n"
573  "Raw images can also be output by using '%%d' in the filename.\n"
574  "\n", argv[0]);
575  return 1;
576  }
577 
578  filename = argv[1];
579  for (i = 2; i+1 < argc; i+=2) {
580  if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
581  av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
582  }
583 
584  /* allocate the output media context */
585  avformat_alloc_output_context2(&oc, NULL, NULL, filename);
586  if (!oc) {
587  printf("Could not deduce output format from file extension: using MPEG.\n");
588  avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
589  }
590  if (!oc)
591  return 1;
592 
593  fmt = oc->oformat;
594 
595  /* Add the audio and video streams using the default format codecs
596  * and initialize the codecs. */
597  if (fmt->video_codec != AV_CODEC_ID_NONE) {
598  add_stream(&video_st, oc, &video_codec, fmt->video_codec);
599  have_video = 1;
600  encode_video = 1;
601  }
602  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
603  add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
604  have_audio = 1;
605  encode_audio = 1;
606  }
607 
608  /* Now that all the parameters are set, we can open the audio and
609  * video codecs and allocate the necessary encode buffers. */
610  if (have_video)
611  open_video(oc, video_codec, &video_st, opt);
612 
613  if (have_audio)
614  open_audio(oc, audio_codec, &audio_st, opt);
615 
616  av_dump_format(oc, 0, filename, 1);
617 
618  /* open the output file, if needed */
619  if (!(fmt->flags & AVFMT_NOFILE)) {
620  ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
621  if (ret < 0) {
622  fprintf(stderr, "Could not open '%s': %s\n", filename,
623  av_err2str(ret));
624  return 1;
625  }
626  }
627 
628  /* Write the stream header, if any. */
629  ret = avformat_write_header(oc, &opt);
630  if (ret < 0) {
631  fprintf(stderr, "Error occurred when opening output file: %s\n",
632  av_err2str(ret));
633  return 1;
634  }
635 
636  while (encode_video || encode_audio) {
637  /* select the stream to encode */
638  if (encode_video &&
639  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
640  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
641  encode_video = !write_video_frame(oc, &video_st);
642  } else {
643  encode_audio = !write_audio_frame(oc, &audio_st);
644  }
645  }
646 
647  /* Write the trailer, if any. The trailer must be written before you
648  * close the CodecContexts open when you wrote the header; otherwise
649  * av_write_trailer() may try to use memory that was freed on
650  * av_codec_close(). */
651  av_write_trailer(oc);
652 
653  /* Close each codec. */
654  if (have_video)
655  close_stream(oc, &video_st);
656  if (have_audio)
657  close_stream(oc, &audio_st);
658 
659  if (!(fmt->flags & AVFMT_NOFILE))
660  /* Close the output file. */
661  avio_closep(&oc->pb);
662 
663  /* free the stream */
665 
666  return 0;
667 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
int64_t next_pts
Definition: muxing.c:58
float, planar
Definition: samplefmt.h:69
const struct AVCodec * codec
Definition: avcodec.h:1574
static enum AVPixelFormat pix_fmt
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int main(int argc, char **argv)
Definition: muxing.c:554
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1615
AVStream * st
Definition: muxing.c:54
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:309
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1825
enum AVCodecID video_codec
default video codec
Definition: avformat.h:516
static AVFormatContext * fmt_ctx
int index
stream index in AVFormatContext
Definition: avformat.h:882
float tincr
Definition: muxing.c:64
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:655
attribute_deprecated int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
static AVPacket pkt
#define AV_CH_LAYOUT_STEREO
static void add_stream(OutputStream *ost, AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
Definition: muxing.c:93
AVCodec.
Definition: avcodec.h:3481
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:396
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1688
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:542
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Format I/O context.
Definition: avformat.h:1358
#define SCALE_FLAGS
Definition: muxing.c:50
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:524
Round toward +infinity.
Definition: mathematics.h:83
struct SwrContext * swr_alloc(void)
Allocate SwrContext.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
Definition: muxing.c:70
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1495
int id
Format-specific stream ID.
Definition: avformat.h:888
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1426
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
struct SwrContext * swr_ctx
Definition: muxing.c:67
int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, const uint8_t **in, int in_count)
Convert audio.
external API header
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const
Rescale a 64-bit integer by 2 rational numbers.
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
libswresample public header
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:215
int width
Definition: frame.h:353
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: muxing.c:373
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
int capabilities
Codec capabilities.
Definition: avcodec.h:3500
struct AVDictionary AVDictionary
Definition: dict.h:90
void av_dict_free(AVDictionary **m)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
int samples_count
Definition: muxing.c:59
simple assert() macros that are a bit more flexible than ISO C assert().
AVFrame * frame
Definition: muxing.c:61
int avformat_alloc_output_context2(AVFormatContext **ctx, ff_const59 AVOutputFormat *oformat, const char *format_name, const char *filename)
Allocate an AVFormatContext for an output format.
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1053
struct SwrContext SwrContext
The libswresample context.
Definition: swresample.h:182
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2276
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1414
audio channel layout utility functions
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
Definition: muxing.c:187
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int width
picture width / height.
Definition: avcodec.h:1738
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:466
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: muxing.c:440
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:507
int mb_decision
macroblock decision mode
Definition: avcodec.h:2053
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const
Rescale a 64-bit integer with specified rounding.
#define STREAM_PIX_FMT
Definition: muxing.c:48
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:220
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
AVCodecContext * enc
Definition: muxing.c:55
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1377
Stream structure.
Definition: avformat.h:881
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2245
#define STREAM_DURATION
Definition: muxing.c:46
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
enum AVCodecID codec_id
Definition: avcodec.h:1575
int sample_rate
samples per second
Definition: avcodec.h:2225
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AVIOContext * pb
I/O context.
Definition: avformat.h:1400
main external API structure.
Definition: avcodec.h:1565
static AVFrame * get_video_frame(OutputStream *ost)
Definition: muxing.c:461
void swr_free(struct SwrContext **s)
Free the given SwrContext and set the pointer to NULL.
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst...
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
Rational number (pair of numerator and denominator).
Definition: rational.h:58
attribute_deprecated int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
#define STREAM_FRAME_RATE
Definition: muxing.c:47
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:904
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1760
Main libavformat public API header.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:463
signed 16 bits
Definition: samplefmt.h:61
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:215
float t
Definition: muxing.c:64
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
Definition: muxing.c:81
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
int channels
number of audio channels
Definition: avcodec.h:2226
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:515
AVFrame * tmp_frame
Definition: muxing.c:62
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1476
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
int height
Definition: frame.h:353
#define M_PI
Definition: mathematics.h:52
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1028
static int height
int stream_index
Definition: avcodec.h:1479
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:910
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
float tincr2
Definition: muxing.c:64
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
struct SwsContext * sws_ctx
Definition: muxing.c:66
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: muxing.c:280
int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static int width