Updated FFmpeg

CQTexperiment
Christopher Snowhill 2019-10-15 19:15:52 -07:00
parent 52a63dc5bc
commit a1ec4dba40
58 changed files with 1392 additions and 1081 deletions

View File

@ -548,6 +548,7 @@
83C7282922BC8C1500678B4A /* mixing.c in Sources */ = {isa = PBXBuildFile; fileRef = 83C7282522BC8C1400678B4A /* mixing.c */; };
83C7282A22BC8C1500678B4A /* plugins.c in Sources */ = {isa = PBXBuildFile; fileRef = 83C7282622BC8C1400678B4A /* plugins.c */; };
83CD428A1F787879000F77BE /* libswresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83CD42851F787878000F77BE /* libswresample.a */; };
83D2F58E2356B266007646ED /* libopus.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D2F58A2356B266007646ED /* libopus.a */; };
83D731101A7394BF00CA1366 /* g7221.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D730EB1A738EB300CA1366 /* g7221.framework */; };
83D731111A7394D300CA1366 /* g7221.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83D730EB1A738EB300CA1366 /* g7221.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; };
83D731891A749D1500CA1366 /* g719.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D7313E1A74968A00CA1366 /* g719.framework */; };
@ -1231,6 +1232,7 @@
83C7282522BC8C1400678B4A /* mixing.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mixing.c; sourceTree = "<group>"; };
83C7282622BC8C1400678B4A /* plugins.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = plugins.c; sourceTree = "<group>"; };
83CD42851F787878000F77BE /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswresample.a; path = ../../ThirdParty/ffmpeg/lib/libswresample.a; sourceTree = "<group>"; };
83D2F58A2356B266007646ED /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libopus.a; path = ../../ThirdParty/ffmpeg/lib/libopus.a; sourceTree = "<group>"; };
83D730E51A738EB200CA1366 /* g7221.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = g7221.xcodeproj; path = ../g7221/g7221.xcodeproj; sourceTree = "<group>"; };
83D731381A74968900CA1366 /* g719.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = g719.xcodeproj; path = ../g719/g719.xcodeproj; sourceTree = "<group>"; };
83D7318B1A749EEE00CA1366 /* g719_decoder.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = g719_decoder.c; sourceTree = "<group>"; };
@ -1264,6 +1266,7 @@
838BDB711D3B1F990022CA6F /* CoreFoundation.framework in Frameworks */,
838BDB6A1D3AF7140022CA6F /* libiconv.tbd in Frameworks */,
838BDB681D3AF70D0022CA6F /* libz.tbd in Frameworks */,
83D2F58E2356B266007646ED /* libopus.a in Frameworks */,
83CD428A1F787879000F77BE /* libswresample.a in Frameworks */,
83D731891A749D1500CA1366 /* g719.framework in Frameworks */,
83D731101A7394BF00CA1366 /* g7221.framework in Frameworks */,
@ -1324,6 +1327,7 @@
836F6B3B18BDB8880095E648 /* Frameworks */ = {
isa = PBXGroup;
children = (
83D2F58A2356B266007646ED /* libopus.a */,
838BDB7E1D3B1FD10022CA6F /* Cocoa.framework */,
838BDB7C1D3B1FCC0022CA6F /* CoreVideo.framework */,
838BDB7A1D3B1FC20022CA6F /* CoreMedia.framework */,

View File

@ -20,6 +20,7 @@
8352D49D1CDDB8C0009D16AA /* CoreMedia.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8352D49C1CDDB8C0009D16AA /* CoreMedia.framework */; };
8352D49F1CDDB8D7009D16AA /* CoreVideo.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8352D49E1CDDB8D7009D16AA /* CoreVideo.framework */; };
83CD428C1F7878A0000F77BE /* libswresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83CD428B1F78789F000F77BE /* libswresample.a */; };
83D2F5892356B210007646ED /* libopus.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 83D2F5882356B210007646ED /* libopus.a */; };
8D5B49B4048680CD000E48DA /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7ADFEA557BF11CA2CBB /* Cocoa.framework */; };
B09E942F0D747F410064F138 /* FFMPEGDecoder.m in Sources */ = {isa = PBXBuildFile; fileRef = B09E942E0D747F410064F138 /* FFMPEGDecoder.m */; };
/* End PBXBuildFile section */
@ -43,6 +44,8 @@
8352D49E1CDDB8D7009D16AA /* CoreVideo.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreVideo.framework; path = System/Library/Frameworks/CoreVideo.framework; sourceTree = SDKROOT; };
8384913818081F6C00E7332D /* Logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Logging.h; path = ../../Utils/Logging.h; sourceTree = "<group>"; };
83CD428B1F78789F000F77BE /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswresample.a; path = ../../ThirdParty/ffmpeg/lib/libswresample.a; sourceTree = "<group>"; };
83D2F5862356B1BE007646ED /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libopus.a; path = ../../../../../../../usr/local/Cellar/opus/1.3.1/lib/libopus.a; sourceTree = "<group>"; };
83D2F5882356B210007646ED /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libopus.a; path = ../../ThirdParty/ffmpeg/lib/libopus.a; sourceTree = "<group>"; };
8D5B49B6048680CD000E48DA /* FFMPEG.bundle */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = FFMPEG.bundle; sourceTree = BUILT_PRODUCTS_DIR; };
8D5B49B7048680CD000E48DA /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
B09E942D0D747F410064F138 /* FFMPEGDecoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FFMPEGDecoder.h; sourceTree = "<group>"; };
@ -66,6 +69,7 @@
8352D49B1CDDB8B2009D16AA /* VideoToolbox.framework in Frameworks */,
8352D48F1CDDB023009D16AA /* CoreFoundation.framework in Frameworks */,
8D5B49B4048680CD000E48DA /* Cocoa.framework in Frameworks */,
83D2F5892356B210007646ED /* libopus.a in Frameworks */,
8352D48C1CDDAEDD009D16AA /* libavformat.a in Frameworks */,
8352D48D1CDDAEDD009D16AA /* libavutil.a in Frameworks */,
8352D48B1CDDAEDD009D16AA /* libavcodec.a in Frameworks */,
@ -86,6 +90,7 @@
089C1671FE841209C02AAC07 /* Frameworks and Libraries */,
19C28FB8FE9D52D311CA2CBB /* Products */,
B09E95F50D74A3ED0064F138 /* Frameworks-Info.plist */,
83D2F5852356B1BD007646ED /* Frameworks */,
);
name = FFMPEG;
sourceTree = "<group>";
@ -165,6 +170,15 @@
name = "Other Sources";
sourceTree = "<group>";
};
83D2F5852356B1BD007646ED /* Frameworks */ = {
isa = PBXGroup;
children = (
83D2F5882356B210007646ED /* libopus.a */,
83D2F5862356B1BE007646ED /* libopus.a */,
);
name = Frameworks;
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXNativeTarget section */
@ -252,6 +266,10 @@
GCC_PREFIX_HEADER = FFMPEG_Prefix.pch;
INFOPLIST_FILE = Info.plist;
INSTALL_PATH = "$(HOME)/Library/Bundles";
LIBRARY_SEARCH_PATHS = (
"$(inherited)",
/usr/local/Cellar/opus/1.3.1/lib,
);
PRODUCT_BUNDLE_IDENTIFIER = org.cogx.ffmpeg;
PRODUCT_NAME = FFMPEG;
SDKROOT = macosx;
@ -272,6 +290,10 @@
GCC_PREFIX_HEADER = FFMPEG_Prefix.pch;
INFOPLIST_FILE = Info.plist;
INSTALL_PATH = "$(HOME)/Library/Bundles";
LIBRARY_SEARCH_PATHS = (
"$(inherited)",
/usr/local/Cellar/opus/1.3.1/lib,
);
PRODUCT_BUNDLE_IDENTIFIER = org.cogx.ffmpeg;
PRODUCT_NAME = FFMPEG;
SDKROOT = macosx;

View File

@ -7,11 +7,12 @@
--disable-swscale --disable-network --disable-swscale-alpha --disable-vdpau\
--disable-dxva2 --disable-everything --enable-hwaccels\
--enable-swresample\
--enable-libopus\
--enable-parser=ac3,mpegaudio,xma,vorbis,opus\
--enable-demuxer=ac3,asf,xwma,mov,oma,ogg,tak,dsf,wav,aac,dts,dtshd,mp3,bink,flac,msf,xmv,caf,ape,smacker,pcm_s8,spdif,mpc,mpc8,rm\
--enable-decoder=ac3,wmapro,wmav1,wmav2,wmavoice,wmalossless,xma1,xma2,dca,tak,dsd_lsbf,dsd_lsbf_planar,dsd_mbf,dsd_msbf_planar,aac,atrac3,atrac3p,mp3float,bink,binkaudio_dct,binkaudio_rdft,flac,pcm_s16be,pcm_s16be_planar,pcm_s16le,pcm_s16le_planar,vorbis,ape,adpcm_ima_qt,smackaud,opus,pcm_s8,pcm_s8_planar,mpc7,mpc8,cook\
--enable-decoder=ac3,wmapro,wmav1,wmav2,wmavoice,wmalossless,xma1,xma2,dca,tak,dsd_lsbf,dsd_lsbf_planar,dsd_mbf,dsd_msbf_planar,aac,atrac3,atrac3p,mp3float,bink,binkaudio_dct,binkaudio_rdft,flac,pcm_s16be,pcm_s16be_planar,pcm_s16le,pcm_s16le_planar,vorbis,ape,adpcm_ima_qt,smackaud,libopus,pcm_s8,pcm_s8_planar,mpc7,mpc8,alac,adpcm_ima_dk3,adpcm_ima_dk4,cook\
--disable-parser=mpeg4video,h263\
--disable-decoder=mpeg2video,h263,h264,mpeg1video,mpeg2video,mpeg4,hevc,vp9\
--disable-version3
make -j8
make -j$(sysctl -n hw.logicalcpu)

View File

@ -409,6 +409,7 @@ enum AVCodecID {
AV_CODEC_ID_DXV,
AV_CODEC_ID_SCREENPRESSO,
AV_CODEC_ID_RSCC,
AV_CODEC_ID_AVS2,
AV_CODEC_ID_Y41P = 0x8000,
AV_CODEC_ID_AVRP,
@ -446,6 +447,17 @@ enum AVCodecID {
AV_CODEC_ID_SVG,
AV_CODEC_ID_GDV,
AV_CODEC_ID_FITS,
AV_CODEC_ID_IMM4,
AV_CODEC_ID_PROSUMER,
AV_CODEC_ID_MWSC,
AV_CODEC_ID_WCMV,
AV_CODEC_ID_RASC,
AV_CODEC_ID_HYMT,
AV_CODEC_ID_ARBC,
AV_CODEC_ID_AGM,
AV_CODEC_ID_LSCR,
AV_CODEC_ID_VP4,
AV_CODEC_ID_IMM5,
/* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
@ -485,6 +497,7 @@ enum AVCodecID {
AV_CODEC_ID_PCM_S64BE,
AV_CODEC_ID_PCM_F16LE,
AV_CODEC_ID_PCM_F24LE,
AV_CODEC_ID_PCM_VIDC,
/* various ADPCM codecs */
AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
@ -529,6 +542,7 @@ enum AVCodecID {
AV_CODEC_ID_ADPCM_AICA,
AV_CODEC_ID_ADPCM_IMA_DAT4,
AV_CODEC_ID_ADPCM_MTAF,
AV_CODEC_ID_ADPCM_AGM,
/* AMR */
AV_CODEC_ID_AMR_NB = 0x12000,
@ -615,6 +629,7 @@ enum AVCodecID {
AV_CODEC_ID_PAF_AUDIO,
AV_CODEC_ID_ON2AVC,
AV_CODEC_ID_DSS_SP,
AV_CODEC_ID_CODEC2,
AV_CODEC_ID_FFWAVESYNTH = 0x15800,
AV_CODEC_ID_SONIC,
@ -635,6 +650,10 @@ enum AVCodecID {
AV_CODEC_ID_DOLBY_E,
AV_CODEC_ID_APTX,
AV_CODEC_ID_APTX_HD,
AV_CODEC_ID_SBC,
AV_CODEC_ID_ATRAC9,
AV_CODEC_ID_HCOM,
AV_CODEC_ID_ACELP_KELVIN,
/* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
@ -663,12 +682,15 @@ enum AVCodecID {
AV_CODEC_ID_PJS,
AV_CODEC_ID_ASS,
AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
AV_CODEC_ID_TTML,
AV_CODEC_ID_ARIB_CAPTION,
/* other specific kind of codecs (generally used for attachments) */
AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
AV_CODEC_ID_TTF = 0x18000,
AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream.
AV_CODEC_ID_EPG,
AV_CODEC_ID_BINTEXT = 0x18800,
AV_CODEC_ID_XBIN,
AV_CODEC_ID_IDF,
@ -841,6 +863,11 @@ typedef struct RcOverride{
* Use qpel MC.
*/
#define AV_CODEC_FLAG_QPEL (1 << 4)
/**
* Don't output frames whose parameters differ from first
* decoded frame in stream.
*/
#define AV_CODEC_FLAG_DROPCHANGED (1 << 5)
/**
* Use internal 2pass ratecontrol in first pass mode.
*/
@ -1060,6 +1087,13 @@ typedef struct RcOverride{
*/
#define AV_CODEC_CAP_HYBRID (1 << 19)
/**
* This codec takes the reordered_opaque field from input AVFrames
* and returns it in the corresponding field in AVCodecContext after
* encoding.
*/
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
/**
* Pan Scan area.
* This specifies the area which should be displayed.
@ -1099,17 +1133,29 @@ typedef struct AVCPBProperties {
* Maximum bitrate of the stream, in bits per second.
* Zero if unknown or unspecified.
*/
#if FF_API_UNSANITIZED_BITRATES
int max_bitrate;
#else
int64_t max_bitrate;
#endif
/**
* Minimum bitrate of the stream, in bits per second.
* Zero if unknown or unspecified.
*/
#if FF_API_UNSANITIZED_BITRATES
int min_bitrate;
#else
int64_t min_bitrate;
#endif
/**
* Average bitrate of the stream, in bits per second.
* Zero if unknown or unspecified.
*/
#if FF_API_UNSANITIZED_BITRATES
int avg_bitrate;
#else
int64_t avg_bitrate;
#endif
/**
* The size of the buffer to which the ratecontrol is applied, in bits.
@ -1310,7 +1356,7 @@ enum AVPacketSideDataType {
AV_PKT_DATA_METADATA_UPDATE,
/**
* MPEGTS stream ID, this is required to pass the stream ID
* MPEGTS stream ID as uint8_t, this is required to pass the stream ID
* information from the demuxer to the corresponding muxer.
*/
AV_PKT_DATA_MPEGTS_STREAM_ID,
@ -1342,6 +1388,25 @@ enum AVPacketSideDataType {
*/
AV_PKT_DATA_A53_CC,
/**
* This side data is encryption initialization data.
* The format is not part of ABI, use av_encryption_init_info_* methods to
* access.
*/
AV_PKT_DATA_ENCRYPTION_INIT_INFO,
/**
* This side data contains encryption info for how to decrypt the packet.
* The format is not part of ABI, use av_encryption_info_* methods to access.
*/
AV_PKT_DATA_ENCRYPTION_INFO,
/**
* Active Format Description data consisting of a single byte as specified
* in ETSI TS 101 154 using AVActiveFormatDescription enum.
*/
AV_PKT_DATA_AFD,
/**
* The number of side data types.
* This is not part of the public API/ABI in the sense that it may
@ -1597,6 +1662,7 @@ typedef struct AVCodecContext {
* The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger
* than extradata_size to avoid problems if it is read with the bitstream reader.
* The bytewise contents of extradata must not depend on the architecture or CPU endianness.
* Must be allocated with the av_malloc() family of functions.
* - encoding: Set/allocated/freed by libavcodec.
* - decoding: Set/allocated/freed by user.
*/
@ -1994,15 +2060,19 @@ typedef struct AVCodecContext {
/**
* custom intra quantization matrix
* - encoding: Set by user, can be NULL.
* - decoding: Set by libavcodec.
* Must be allocated with the av_malloc() family of functions, and will be freed in
* avcodec_free_context().
* - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
* - decoding: Set/allocated/freed by libavcodec.
*/
uint16_t *intra_matrix;
/**
* custom inter quantization matrix
* - encoding: Set by user, can be NULL.
* - decoding: Set by libavcodec.
* Must be allocated with the av_malloc() family of functions, and will be freed in
* avcodec_free_context().
* - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
* - decoding: Set/allocated/freed by libavcodec.
*/
uint16_t *inter_matrix;
@ -2646,7 +2716,10 @@ typedef struct AVCodecContext {
/**
* opaque 64-bit number (generally a PTS) that will be reordered and
* output in AVFrame.reordered_opaque
* - encoding: unused
* - encoding: Set by libavcodec to the reordered_opaque of the input
* frame corresponding to the last returned packet. Only
* supported by encoders with the
* AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability.
* - decoding: Set by user.
*/
int64_t reordered_opaque;
@ -2918,6 +2991,28 @@ typedef struct AVCodecContext {
#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
#define FF_PROFILE_HEVC_REXT 4
#define FF_PROFILE_AV1_MAIN 0
#define FF_PROFILE_AV1_HIGH 1
#define FF_PROFILE_AV1_PROFESSIONAL 2
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3
#define FF_PROFILE_MJPEG_JPEG_LS 0xf7
#define FF_PROFILE_SBC_MSBC 1
#define FF_PROFILE_PRORES_PROXY 0
#define FF_PROFILE_PRORES_LT 1
#define FF_PROFILE_PRORES_STANDARD 2
#define FF_PROFILE_PRORES_HQ 3
#define FF_PROFILE_PRORES_4444 4
#define FF_PROFILE_PRORES_XQ 5
#define FF_PROFILE_ARIB_PROFILE_A 0
#define FF_PROFILE_ARIB_PROFILE_C 1
/**
* level
* - encoding: Set by user.
@ -3069,6 +3164,7 @@ typedef struct AVCodecContext {
#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)
#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself
#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
#define FF_SUB_CHARENC_MODE_IGNORE 2 ///< neither convert the subtitles, nor check them for valid UTF-8
/**
* Skip processing alpha if supported by codec.
@ -3269,6 +3365,22 @@ typedef struct AVCodecContext {
* used as reference pictures).
*/
int extra_hw_frames;
/**
* The percentage of damaged samples to discard a frame.
*
* - decoding: set by user
* - encoding: unused
*/
int discard_damaged_percentage;
/**
* The number of samples per frame to maximally accept.
*
* - decoding: set by user
* - encoding: set by user
*/
int64_t max_samples;
} AVCodecContext;
#if FF_API_CODEC_GET_SET
@ -4321,7 +4433,7 @@ int av_grow_packet(AVPacket *pkt, int grow_by);
* Initialize a reference-counted packet from av_malloc()ed data.
*
* @param pkt packet to be initialized. This function will set the data, size,
* buf and destruct fields, all others are left untouched.
* and buf fields, all others are left untouched.
* @param data Data allocated by av_malloc() to be used as packet data. If this
* function returns successfully, the data is owned by the underlying AVBuffer.
* The caller may not access the data through other means.
@ -4337,7 +4449,7 @@ int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
* @warning This is a hack - the packet memory allocation stuff is broken. The
* packet is allocated if it was not really allocated.
*
* @deprecated Use av_packet_ref
* @deprecated Use av_packet_ref or av_packet_make_refcounted
*/
attribute_deprecated
int av_dup_packet(AVPacket *pkt);
@ -4508,6 +4620,33 @@ void av_packet_move_ref(AVPacket *dst, AVPacket *src);
*/
int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
/**
* Ensure the data described by a given packet is reference counted.
*
* @note This function does not ensure that the reference will be writable.
* Use av_packet_make_writable instead for that purpose.
*
* @see av_packet_ref
* @see av_packet_make_writable
*
* @param pkt packet whose data should be made reference counted.
*
* @return 0 on success, a negative AVERROR on error. On failure, the
* packet is unchanged.
*/
int av_packet_make_refcounted(AVPacket *pkt);
/**
* Create a writable reference for the data described by a given packet,
* avoiding data copy if possible.
*
* @param pkt Packet whose data should be made writable.
*
* @return 0 on success, a negative AVERROR on failure. On failure, the
* packet is unchanged.
*/
int av_packet_make_writable(AVPacket *pkt);
/**
* Convert valid timing fields (timestamps / durations) in a packet from one
* timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
@ -4800,6 +4939,9 @@ int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
* AVERROR_EOF: the decoder has been fully flushed, and there will be
* no more output frames
* AVERROR(EINVAL): codec not opened, or it is an encoder
* AVERROR_INPUT_CHANGED: current decoded frame has changed parameters
* with respect to first decoded frame. Applicable
* when flag AV_CODEC_FLAG_DROPCHANGED is set.
* other negative values: legitimate decoding errors
*/
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
@ -5711,6 +5853,7 @@ typedef struct AVBitStreamFilter {
int (*init)(AVBSFContext *ctx);
int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
void (*close)(AVBSFContext *ctx);
void (*flush)(AVBSFContext *ctx);
} AVBitStreamFilter;
#if FF_API_OLD_BSF
@ -5837,6 +5980,11 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
*/
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt);
/**
* Reset the internal bitstream filter state / flush internal buffers.
*/
void av_bsf_flush(AVBSFContext *ctx);
/**
* Free a bitstream filter context and everything associated with it; write NULL
* into the supplied pointer.

View File

@ -85,4 +85,17 @@ typedef struct MediaCodecBuffer AVMediaCodecBuffer;
*/
int av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render);
/**
* Release a MediaCodec buffer and render it at the given time to the surface
* that is associated with the decoder. The timestamp must be within one second
* of the current java/lang/System#nanoTime() (which is implemented using
* CLOCK_MONOTONIC on Android). See the Android MediaCodec documentation
* of android/media/MediaCodec#releaseOutputBuffer(int,long) for more details.
*
* @param buffer the buffer to render
* @param time timestamp in nanoseconds of when to render the buffer
* @return 0 on success, < 0 otherwise
*/
int av_mediacodec_render_buffer_at_time(AVMediaCodecBuffer *buffer, int64_t time);
#endif /* AVCODEC_MEDIACODEC_H */

View File

@ -1,397 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_OLD_CODEC_IDS_H
#define AVCODEC_OLD_CODEC_IDS_H
/*
* This header exists to prevent new codec IDs from being accidentally added to
* the deprecated list.
* Do not include it directly. It will be removed on next major bump
*
* Do not add new items to this list. Use the AVCodecID enum instead.
*/
CODEC_ID_NONE = AV_CODEC_ID_NONE,
/* video codecs */
CODEC_ID_MPEG1VIDEO,
CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
#if FF_API_XVMC
CODEC_ID_MPEG2VIDEO_XVMC,
#endif
CODEC_ID_H261,
CODEC_ID_H263,
CODEC_ID_RV10,
CODEC_ID_RV20,
CODEC_ID_MJPEG,
CODEC_ID_MJPEGB,
CODEC_ID_LJPEG,
CODEC_ID_SP5X,
CODEC_ID_JPEGLS,
CODEC_ID_MPEG4,
CODEC_ID_RAWVIDEO,
CODEC_ID_MSMPEG4V1,
CODEC_ID_MSMPEG4V2,
CODEC_ID_MSMPEG4V3,
CODEC_ID_WMV1,
CODEC_ID_WMV2,
CODEC_ID_H263P,
CODEC_ID_H263I,
CODEC_ID_FLV1,
CODEC_ID_SVQ1,
CODEC_ID_SVQ3,
CODEC_ID_DVVIDEO,
CODEC_ID_HUFFYUV,
CODEC_ID_CYUV,
CODEC_ID_H264,
CODEC_ID_INDEO3,
CODEC_ID_VP3,
CODEC_ID_THEORA,
CODEC_ID_ASV1,
CODEC_ID_ASV2,
CODEC_ID_FFV1,
CODEC_ID_4XM,
CODEC_ID_VCR1,
CODEC_ID_CLJR,
CODEC_ID_MDEC,
CODEC_ID_ROQ,
CODEC_ID_INTERPLAY_VIDEO,
CODEC_ID_XAN_WC3,
CODEC_ID_XAN_WC4,
CODEC_ID_RPZA,
CODEC_ID_CINEPAK,
CODEC_ID_WS_VQA,
CODEC_ID_MSRLE,
CODEC_ID_MSVIDEO1,
CODEC_ID_IDCIN,
CODEC_ID_8BPS,
CODEC_ID_SMC,
CODEC_ID_FLIC,
CODEC_ID_TRUEMOTION1,
CODEC_ID_VMDVIDEO,
CODEC_ID_MSZH,
CODEC_ID_ZLIB,
CODEC_ID_QTRLE,
CODEC_ID_TSCC,
CODEC_ID_ULTI,
CODEC_ID_QDRAW,
CODEC_ID_VIXL,
CODEC_ID_QPEG,
CODEC_ID_PNG,
CODEC_ID_PPM,
CODEC_ID_PBM,
CODEC_ID_PGM,
CODEC_ID_PGMYUV,
CODEC_ID_PAM,
CODEC_ID_FFVHUFF,
CODEC_ID_RV30,
CODEC_ID_RV40,
CODEC_ID_VC1,
CODEC_ID_WMV3,
CODEC_ID_LOCO,
CODEC_ID_WNV1,
CODEC_ID_AASC,
CODEC_ID_INDEO2,
CODEC_ID_FRAPS,
CODEC_ID_TRUEMOTION2,
CODEC_ID_BMP,
CODEC_ID_CSCD,
CODEC_ID_MMVIDEO,
CODEC_ID_ZMBV,
CODEC_ID_AVS,
CODEC_ID_SMACKVIDEO,
CODEC_ID_NUV,
CODEC_ID_KMVC,
CODEC_ID_FLASHSV,
CODEC_ID_CAVS,
CODEC_ID_JPEG2000,
CODEC_ID_VMNC,
CODEC_ID_VP5,
CODEC_ID_VP6,
CODEC_ID_VP6F,
CODEC_ID_TARGA,
CODEC_ID_DSICINVIDEO,
CODEC_ID_TIERTEXSEQVIDEO,
CODEC_ID_TIFF,
CODEC_ID_GIF,
CODEC_ID_DXA,
CODEC_ID_DNXHD,
CODEC_ID_THP,
CODEC_ID_SGI,
CODEC_ID_C93,
CODEC_ID_BETHSOFTVID,
CODEC_ID_PTX,
CODEC_ID_TXD,
CODEC_ID_VP6A,
CODEC_ID_AMV,
CODEC_ID_VB,
CODEC_ID_PCX,
CODEC_ID_SUNRAST,
CODEC_ID_INDEO4,
CODEC_ID_INDEO5,
CODEC_ID_MIMIC,
CODEC_ID_RL2,
CODEC_ID_ESCAPE124,
CODEC_ID_DIRAC,
CODEC_ID_BFI,
CODEC_ID_CMV,
CODEC_ID_MOTIONPIXELS,
CODEC_ID_TGV,
CODEC_ID_TGQ,
CODEC_ID_TQI,
CODEC_ID_AURA,
CODEC_ID_AURA2,
CODEC_ID_V210X,
CODEC_ID_TMV,
CODEC_ID_V210,
CODEC_ID_DPX,
CODEC_ID_MAD,
CODEC_ID_FRWU,
CODEC_ID_FLASHSV2,
CODEC_ID_CDGRAPHICS,
CODEC_ID_R210,
CODEC_ID_ANM,
CODEC_ID_BINKVIDEO,
CODEC_ID_IFF_ILBM,
CODEC_ID_IFF_BYTERUN1,
CODEC_ID_KGV1,
CODEC_ID_YOP,
CODEC_ID_VP8,
CODEC_ID_PICTOR,
CODEC_ID_ANSI,
CODEC_ID_A64_MULTI,
CODEC_ID_A64_MULTI5,
CODEC_ID_R10K,
CODEC_ID_MXPEG,
CODEC_ID_LAGARITH,
CODEC_ID_PRORES,
CODEC_ID_JV,
CODEC_ID_DFA,
CODEC_ID_WMV3IMAGE,
CODEC_ID_VC1IMAGE,
CODEC_ID_UTVIDEO,
CODEC_ID_BMV_VIDEO,
CODEC_ID_VBLE,
CODEC_ID_DXTORY,
CODEC_ID_V410,
CODEC_ID_XWD,
CODEC_ID_CDXL,
CODEC_ID_XBM,
CODEC_ID_ZEROCODEC,
CODEC_ID_MSS1,
CODEC_ID_MSA1,
CODEC_ID_TSCC2,
CODEC_ID_MTS2,
CODEC_ID_CLLC,
CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
CODEC_ID_EXR = MKBETAG('0','E','X','R'),
CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'),
CODEC_ID_AVUI = MKBETAG('A','V','U','I'),
CODEC_ID_AYUV = MKBETAG('A','Y','U','V'),
CODEC_ID_V308 = MKBETAG('V','3','0','8'),
CODEC_ID_V408 = MKBETAG('V','4','0','8'),
CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'),
CODEC_ID_SANM = MKBETAG('S','A','N','M'),
CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'),
CODEC_ID_SNOW = AV_CODEC_ID_SNOW,
/* various PCM "codecs" */
CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
CODEC_ID_PCM_S16LE = 0x10000,
CODEC_ID_PCM_S16BE,
CODEC_ID_PCM_U16LE,
CODEC_ID_PCM_U16BE,
CODEC_ID_PCM_S8,
CODEC_ID_PCM_U8,
CODEC_ID_PCM_MULAW,
CODEC_ID_PCM_ALAW,
CODEC_ID_PCM_S32LE,
CODEC_ID_PCM_S32BE,
CODEC_ID_PCM_U32LE,
CODEC_ID_PCM_U32BE,
CODEC_ID_PCM_S24LE,
CODEC_ID_PCM_S24BE,
CODEC_ID_PCM_U24LE,
CODEC_ID_PCM_U24BE,
CODEC_ID_PCM_S24DAUD,
CODEC_ID_PCM_ZORK,
CODEC_ID_PCM_S16LE_PLANAR,
CODEC_ID_PCM_DVD,
CODEC_ID_PCM_F32BE,
CODEC_ID_PCM_F32LE,
CODEC_ID_PCM_F64BE,
CODEC_ID_PCM_F64LE,
CODEC_ID_PCM_BLURAY,
CODEC_ID_PCM_LXF,
CODEC_ID_S302M,
CODEC_ID_PCM_S8_PLANAR,
/* various ADPCM codecs */
CODEC_ID_ADPCM_IMA_QT = 0x11000,
CODEC_ID_ADPCM_IMA_WAV,
CODEC_ID_ADPCM_IMA_DK3,
CODEC_ID_ADPCM_IMA_DK4,
CODEC_ID_ADPCM_IMA_WS,
CODEC_ID_ADPCM_IMA_SMJPEG,
CODEC_ID_ADPCM_MS,
CODEC_ID_ADPCM_4XM,
CODEC_ID_ADPCM_XA,
CODEC_ID_ADPCM_ADX,
CODEC_ID_ADPCM_EA,
CODEC_ID_ADPCM_G726,
CODEC_ID_ADPCM_CT,
CODEC_ID_ADPCM_SWF,
CODEC_ID_ADPCM_YAMAHA,
CODEC_ID_ADPCM_SBPRO_4,
CODEC_ID_ADPCM_SBPRO_3,
CODEC_ID_ADPCM_SBPRO_2,
CODEC_ID_ADPCM_THP,
CODEC_ID_ADPCM_IMA_AMV,
CODEC_ID_ADPCM_EA_R1,
CODEC_ID_ADPCM_EA_R3,
CODEC_ID_ADPCM_EA_R2,
CODEC_ID_ADPCM_IMA_EA_SEAD,
CODEC_ID_ADPCM_IMA_EA_EACS,
CODEC_ID_ADPCM_EA_XAS,
CODEC_ID_ADPCM_EA_MAXIS_XA,
CODEC_ID_ADPCM_IMA_ISS,
CODEC_ID_ADPCM_G722,
CODEC_ID_ADPCM_IMA_APC,
CODEC_ID_VIMA = MKBETAG('V','I','M','A'),
/* AMR */
CODEC_ID_AMR_NB = 0x12000,
CODEC_ID_AMR_WB,
/* RealAudio codecs*/
CODEC_ID_RA_144 = 0x13000,
CODEC_ID_RA_288,
/* various DPCM codecs */
CODEC_ID_ROQ_DPCM = 0x14000,
CODEC_ID_INTERPLAY_DPCM,
CODEC_ID_XAN_DPCM,
CODEC_ID_SOL_DPCM,
/* audio codecs */
CODEC_ID_MP2 = 0x15000,
CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
CODEC_ID_AAC,
CODEC_ID_AC3,
CODEC_ID_DTS,
CODEC_ID_VORBIS,
CODEC_ID_DVAUDIO,
CODEC_ID_WMAV1,
CODEC_ID_WMAV2,
CODEC_ID_MACE3,
CODEC_ID_MACE6,
CODEC_ID_VMDAUDIO,
CODEC_ID_FLAC,
CODEC_ID_MP3ADU,
CODEC_ID_MP3ON4,
CODEC_ID_SHORTEN,
CODEC_ID_ALAC,
CODEC_ID_WESTWOOD_SND1,
CODEC_ID_GSM, ///< as in Berlin toast format
CODEC_ID_QDM2,
CODEC_ID_COOK,
CODEC_ID_TRUESPEECH,
CODEC_ID_TTA,
CODEC_ID_SMACKAUDIO,
CODEC_ID_QCELP,
CODEC_ID_WAVPACK,
CODEC_ID_DSICINAUDIO,
CODEC_ID_IMC,
CODEC_ID_MUSEPACK7,
CODEC_ID_MLP,
CODEC_ID_GSM_MS, /* as found in WAV */
CODEC_ID_ATRAC3,
CODEC_ID_VOXWARE,
CODEC_ID_APE,
CODEC_ID_NELLYMOSER,
CODEC_ID_MUSEPACK8,
CODEC_ID_SPEEX,
CODEC_ID_WMAVOICE,
CODEC_ID_WMAPRO,
CODEC_ID_WMALOSSLESS,
CODEC_ID_ATRAC3P,
CODEC_ID_EAC3,
CODEC_ID_SIPR,
CODEC_ID_MP1,
CODEC_ID_TWINVQ,
CODEC_ID_TRUEHD,
CODEC_ID_MP4ALS,
CODEC_ID_ATRAC1,
CODEC_ID_BINKAUDIO_RDFT,
CODEC_ID_BINKAUDIO_DCT,
CODEC_ID_AAC_LATM,
CODEC_ID_QDMC,
CODEC_ID_CELT,
CODEC_ID_G723_1,
CODEC_ID_G729,
CODEC_ID_8SVX_EXP,
CODEC_ID_8SVX_FIB,
CODEC_ID_BMV_AUDIO,
CODEC_ID_RALF,
CODEC_ID_IAC,
CODEC_ID_ILBC,
CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'),
CODEC_ID_OPUS = MKBETAG('O','P','U','S'),
/* subtitle codecs */
CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
CODEC_ID_DVD_SUBTITLE = 0x17000,
CODEC_ID_DVB_SUBTITLE,
CODEC_ID_TEXT, ///< raw UTF-8 text
CODEC_ID_XSUB,
CODEC_ID_SSA,
CODEC_ID_MOV_TEXT,
CODEC_ID_HDMV_PGS_SUBTITLE,
CODEC_ID_DVB_TELETEXT,
CODEC_ID_SRT,
CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'),
CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'),
CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'),
CODEC_ID_SAMI = MKBETAG('S','A','M','I'),
CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'),
CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'),
/* other specific kind of codecs (generally used for attachments) */
CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
CODEC_ID_TTF = 0x18000,
CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'),
CODEC_ID_XBIN = MKBETAG('X','B','I','N'),
CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'),
CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'),
CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
* stream (only used by libavformat) */
CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
* stream (only used by libavformat) */
CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
#endif /* AVCODEC_OLD_CODEC_IDS_H */

View File

@ -1,230 +0,0 @@
/*
* VDA HW acceleration
*
* copyright (c) 2011 Sebastien Zwickert
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VDA_H
#define AVCODEC_VDA_H
/**
* @file
* @ingroup lavc_codec_hwaccel_vda
* Public libavcodec VDA header.
*/
#include "libavcodec/avcodec.h"
#include <stdint.h>
// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes
// http://openradar.appspot.com/8026390
#undef __GNUC_STDC_INLINE__
#define Picture QuickdrawPicture
#include <VideoDecodeAcceleration/VDADecoder.h>
#undef Picture
#include "libavcodec/version.h"
// extra flags not defined in VDADecoder.h
enum {
kVDADecodeInfo_Asynchronous = 1UL << 0,
kVDADecodeInfo_FrameDropped = 1UL << 1
};
/**
* @defgroup lavc_codec_hwaccel_vda VDA
* @ingroup lavc_codec_hwaccel
*
* @{
*/
/**
* This structure is used to provide the necessary configurations and data
* to the VDA FFmpeg HWAccel implementation.
*
* The application must make it available as AVCodecContext.hwaccel_context.
*/
struct vda_context {
/**
* VDA decoder object.
*
* - encoding: unused
* - decoding: Set/Unset by libavcodec.
*/
VDADecoder decoder;
/**
* The Core Video pixel buffer that contains the current image data.
*
* encoding: unused
* decoding: Set by libavcodec. Unset by user.
*/
CVPixelBufferRef cv_buffer;
/**
* Use the hardware decoder in synchronous mode.
*
* encoding: unused
* decoding: Set by user.
*/
int use_sync_decoding;
/**
* The frame width.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
int width;
/**
* The frame height.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
int height;
/**
* The frame format.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
int format;
/**
* The pixel format for output image buffers.
*
* - encoding: unused
* - decoding: Set/Unset by user.
*/
OSType cv_pix_fmt_type;
/**
* unused
*/
uint8_t *priv_bitstream;
/**
* unused
*/
int priv_bitstream_size;
/**
* unused
*/
int priv_allocated_size;
/**
* Use av_buffer to manage buffer.
* When the flag is set, the CVPixelBuffers returned by the decoder will
* be released automatically, so you have to retain them if necessary.
* Not setting this flag may cause memory leak.
*
* encoding: unused
* decoding: Set by user.
*/
int use_ref_buffer;
};
/** Create the video decoder. */
int ff_vda_create_decoder(struct vda_context *vda_ctx,
uint8_t *extradata,
int extradata_size);
/** Destroy the video decoder. */
int ff_vda_destroy_decoder(struct vda_context *vda_ctx);
/**
* This struct holds all the information that needs to be passed
* between the caller and libavcodec for initializing VDA decoding.
* Its size is not a part of the public ABI, it must be allocated with
* av_vda_alloc_context() and freed with av_free().
*/
typedef struct AVVDAContext {
/**
* VDA decoder object. Created and freed by the caller.
*/
VDADecoder decoder;
/**
* The output callback that must be passed to VDADecoderCreate.
* Set by av_vda_alloc_context().
*/
VDADecoderOutputCallback output_callback;
/**
* CVPixelBuffer Format Type that VDA will use for decoded frames; set by
* the caller.
*/
OSType cv_pix_fmt_type;
} AVVDAContext;
/**
* Allocate and initialize a VDA context.
*
* This function should be called from the get_format() callback when the caller
* selects the AV_PIX_FMT_VDA format. The caller must then create the decoder
* object (using the output callback provided by libavcodec) that will be used
* for VDA-accelerated decoding.
*
* When decoding with VDA is finished, the caller must destroy the decoder
* object and free the VDA context using av_free().
*
* @return the newly allocated context or NULL on failure
*/
AVVDAContext *av_vda_alloc_context(void);
/**
* This is a convenience function that creates and sets up the VDA context using
* an internal implementation.
*
* @param avctx the corresponding codec context
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_vda_default_init(AVCodecContext *avctx);
/**
* This is a convenience function that creates and sets up the VDA context using
* an internal implementation.
*
* @param avctx the corresponding codec context
* @param vdactx the VDA context to use
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_vda_default_init2(AVCodecContext *avctx, AVVDAContext *vdactx);
/**
* This function must be called to free the VDA context initialized with
* av_vda_default_init().
*
* @param avctx the corresponding codec context
*/
void av_vda_default_free(AVCodecContext *avctx);
/**
* @}
*/
#endif /* AVCODEC_VDA_H */

View File

@ -28,8 +28,8 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 58
#define LIBAVCODEC_VERSION_MINOR 11
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_MINOR 59
#define LIBAVCODEC_VERSION_MICRO 102
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
@ -132,6 +132,9 @@
#ifndef FF_API_NEXT
#define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_UNSANITIZED_BITRATES
#define FF_API_UNSANITIZED_BITRATES (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#endif /* AVCODEC_VERSION_H */

View File

@ -36,17 +36,15 @@
* into component streams, and the reverse process of muxing - writing supplied
* data in a specified container format. It also has an @ref lavf_io
* "I/O module" which supports a number of protocols for accessing the data (e.g.
* file, tcp, http and others). Before using lavf, you need to call
* av_register_all() to register all compiled muxers, demuxers and protocols.
* file, tcp, http and others).
* Unless you are absolutely sure you won't use libavformat's network
* capabilities, you should also call avformat_network_init().
*
* A supported input format is described by an AVInputFormat struct, conversely
* an output format is described by AVOutputFormat. You can iterate over all
* registered input/output formats using the av_iformat_next() /
* av_oformat_next() functions. The protocols layer is not part of the public
* API, so you can only get the names of supported protocols with the
* avio_enum_protocols() function.
* input/output formats using the av_demuxer_iterate / av_muxer_iterate() functions.
* The protocols layer is not part of the public API, so you can only get the names
* of supported protocols with the avio_enum_protocols() function.
*
* Main lavf structure used for both muxing and demuxing is AVFormatContext,
* which exports all information about the file being read or written. As with
@ -177,8 +175,8 @@
* Otherwise, if AVPacket.buf is NULL, the packet data is backed by a
* static storage somewhere inside the demuxer and the packet is only valid
* until the next av_read_frame() call or closing the file. If the caller
* requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy
* of it.
* requires a longer lifetime, av_packet_make_refcounted() will ensure that
* the data is reference counted, copying the data if necessary.
* In both cases, the packet must be freed with av_packet_unref() when it is no
* longer needed.
*
@ -532,7 +530,16 @@ typedef struct AVOutputFormat {
* New public fields should be added right above.
*****************************************************************
*/
struct AVOutputFormat *next;
/**
* The ff_const59 define is not part of the public API and will
* be removed without further warning.
*/
#if FF_API_AVIOFORMAT
#define ff_const59
#else
#define ff_const59 const
#endif
ff_const59 struct AVOutputFormat *next;
/**
* size of private data so that it can be allocated in the wrapper
*/
@ -646,7 +653,7 @@ typedef struct AVInputFormat {
/**
* Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS,
* AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,
* AVFMT_NOTIMESTAMPS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,
* AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
*/
int flags;
@ -676,7 +683,7 @@ typedef struct AVInputFormat {
* New public fields should be added right above.
*****************************************************************
*/
struct AVInputFormat *next;
ff_const59 struct AVInputFormat *next;
/**
* Raw demuxers store their codec ID here.
@ -693,7 +700,7 @@ typedef struct AVInputFormat {
* The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes
* big so you do not have to check for that unless you need more.
*/
int (*read_probe)(AVProbeData *);
int (*read_probe)(const AVProbeData *);
/**
* Read the format header and initialize the AVFormatContext
@ -845,6 +852,8 @@ typedef struct AVStreamInternal AVStreamInternal;
#define AV_DISPOSITION_CAPTIONS 0x10000
#define AV_DISPOSITION_DESCRIPTIONS 0x20000
#define AV_DISPOSITION_METADATA 0x40000
#define AV_DISPOSITION_DEPENDENT 0x80000 ///< dependent audio stream (mix_type=0 in mpegts)
#define AV_DISPOSITION_STILL_IMAGE 0x100000 ///< still images in video stream (still_picture_flag=1 in mpegts)
/**
* Options for behavior on timestamp wrap detection.
@ -1101,6 +1110,13 @@ typedef struct AVStream {
*/
int stream_identifier;
/**
* Details of the MPEG-TS program which created this stream.
*/
int program_num;
int pmt_version;
int pmt_stream_idx;
int64_t interleaver_chunk_size;
int64_t interleaver_chunk_duration;
@ -1258,6 +1274,7 @@ typedef struct AVProgram {
int program_num;
int pmt_pid;
int pcr_pid;
int pmt_version;
/*****************************************************************
* All fields below this line are not part of the public API. They
@ -1336,14 +1353,14 @@ typedef struct AVFormatContext {
*
* Demuxing only, set by avformat_open_input().
*/
struct AVInputFormat *iformat;
ff_const59 struct AVInputFormat *iformat;
/**
* The output container format.
*
* Muxing only, must be set by the caller before avformat_write_header().
*/
struct AVOutputFormat *oformat;
ff_const59 struct AVOutputFormat *oformat;
/**
* Format private data. This is an AVOptions-enabled struct
@ -1473,7 +1490,9 @@ typedef struct AVFormatContext {
* This flag is mainly intended for testing.
*/
#define AVFMT_FLAG_BITEXACT 0x0400
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload
#if FF_API_LAVF_MP4A_LATM
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Deprecated, does nothing.
#endif
#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
#if FF_API_LAVF_KEEPSIDE_FLAG
@ -1925,6 +1944,13 @@ typedef struct AVFormatContext {
* - decoding: set by user
*/
int max_streams;
/**
* Skip duration calcuation in estimate_timings_from_pts.
* - encoding: unused
* - decoding: set by user
*/
int skip_estimate_duration_from_pts;
} AVFormatContext;
#if FF_API_FORMAT_GET_SET
@ -2192,7 +2218,7 @@ AVProgram *av_new_program(AVFormatContext *s, int id);
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
*/
int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,
int avformat_alloc_output_context2(AVFormatContext **ctx, ff_const59 AVOutputFormat *oformat,
const char *format_name, const char *filename);
/**
@ -2203,7 +2229,7 @@ int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oforma
/**
* Find AVInputFormat based on the short name of the input format.
*/
AVInputFormat *av_find_input_format(const char *short_name);
ff_const59 AVInputFormat *av_find_input_format(const char *short_name);
/**
* Guess the file format.
@ -2212,7 +2238,7 @@ AVInputFormat *av_find_input_format(const char *short_name);
* @param is_opened Whether the file is already opened; determines whether
* demuxers with or without AVFMT_NOFILE are probed.
*/
AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
ff_const59 AVInputFormat *av_probe_input_format(ff_const59 AVProbeData *pd, int is_opened);
/**
* Guess the file format.
@ -2226,7 +2252,7 @@ AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
* If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended
* to retry with a larger probe buffer.
*/
AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max);
ff_const59 AVInputFormat *av_probe_input_format2(ff_const59 AVProbeData *pd, int is_opened, int *score_max);
/**
* Guess the file format.
@ -2235,7 +2261,7 @@ AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score
* demuxers with or without AVFMT_NOFILE are probed.
* @param score_ret The score of the best detection.
*/
AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret);
ff_const59 AVInputFormat *av_probe_input_format3(ff_const59 AVProbeData *pd, int is_opened, int *score_ret);
/**
* Probe a bytestream to determine the input format. Each time a probe returns
@ -2253,14 +2279,14 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score
* the maximal score is AVPROBE_SCORE_MAX
* AVERROR code otherwise
*/
int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
int av_probe_input_buffer2(AVIOContext *pb, ff_const59 AVInputFormat **fmt,
const char *url, void *logctx,
unsigned int offset, unsigned int max_probe_size);
/**
* Like av_probe_input_buffer2() but returns 0 on success
*/
int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt,
const char *url, void *logctx,
unsigned int offset, unsigned int max_probe_size);
@ -2283,7 +2309,7 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
*
* @note If you want to use custom IO, preallocate the format context and set its pb field.
*/
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options);
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options);
attribute_deprecated
int av_demuxer_open(AVFormatContext *ic);
@ -2668,14 +2694,14 @@ int av_write_trailer(AVFormatContext *s);
* @param mime_type if non-NULL checks if mime_type matches with the
* MIME type of the registered formats
*/
AVOutputFormat *av_guess_format(const char *short_name,
ff_const59 AVOutputFormat *av_guess_format(const char *short_name,
const char *filename,
const char *mime_type);
/**
* Guess the codec ID based upon muxer and filename.
*/
enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
enum AVCodecID av_guess_codec(ff_const59 AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type,
enum AVMediaType type);

View File

@ -236,7 +236,7 @@ typedef struct AVIOContext {
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
int64_t (*seek)(void *opaque, int64_t offset, int whence);
int64_t pos; /**< position in the file of the current buffer */
int eof_reached; /**< true if eof reached */
int eof_reached; /**< true if was unable to read due to error or eof */
int write_flag; /**< true if open for writing */
int max_packet_size;
unsigned long checksum;
@ -566,14 +566,34 @@ static av_always_inline int64_t avio_tell(AVIOContext *s)
int64_t avio_size(AVIOContext *s);
/**
* feof() equivalent for AVIOContext.
* @return non zero if and only if end of file
* Similar to feof() but also returns nonzero on read errors.
* @return non zero if and only if at end of file or a read error happened when reading.
*/
int avio_feof(AVIOContext *s);
/** @warning Writes up to 4 KiB per call */
/**
* Writes a formatted string to the context.
* @return number of bytes written, < 0 on error.
*/
int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);
/**
* Write a NULL terminated array of strings to the context.
* Usually you don't need to use this function directly but its macro wrapper,
* avio_print.
*/
void avio_print_string_array(AVIOContext *s, const char *strings[]);
/**
* Write strings (const char *) to the context.
* This is a convenience macro around avio_print_string_array and it
* automatically creates the string array from the variable argument list.
* For simple string concatenations this function is more performant than using
* avio_printf since it does not need a temporary buffer.
*/
#define avio_print(s, ...) \
avio_print_string_array(s, (const char*[]){__VA_ARGS__, NULL})
/**
* Force flushing of buffered data.
*

View File

@ -32,7 +32,7 @@
// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium)
// Also please add any ticket numbers that you believe might be affected here
#define LIBAVFORMAT_VERSION_MAJOR 58
#define LIBAVFORMAT_VERSION_MINOR 9
#define LIBAVFORMAT_VERSION_MINOR 33
#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
@ -70,6 +70,9 @@
#ifndef FF_API_HLS_WRAP
#define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_HLS_USE_LOCALTIME
#define FF_API_HLS_USE_LOCALTIME (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_KEEPSIDE_FLAG
#define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
@ -94,6 +97,15 @@
#ifndef FF_API_NEXT
#define FF_API_NEXT (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_DASH_MIN_SEG_DURATION
#define FF_API_DASH_MIN_SEG_DURATION (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_MP4A_LATM
#define FF_API_LAVF_MP4A_LATM (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVIOFORMAT
#define FF_API_AVIOFORMAT (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_R_FRAME_RATE

View File

@ -1,6 +0,0 @@
#include "version.h"
#if FF_API_AUDIOCONVERT
#include "channel_layout.h"
#endif

View File

@ -66,7 +66,7 @@
#endif
/**
* Assert that floating point opperations can be executed.
* Assert that floating point operations can be executed.
*
* This will av_assert0() that the cpu is not in MMX state on X86
*/

View File

@ -274,16 +274,21 @@ char *av_strireplace(const char *str, const char *from, const char *to);
/**
* Thread safe basename.
* @param path the path, on DOS both \ and / are considered separators.
* @param path the string to parse, on DOS both \ and / are considered separators.
* @return pointer to the basename substring.
* If path does not contain a slash, the function returns a copy of path.
* If path is a NULL pointer or points to an empty string, a pointer
* to a string "." is returned.
*/
const char *av_basename(const char *path);
/**
* Thread safe dirname.
* @param path the path, on DOS both \ and / are considered separators.
* @return the path with the separator replaced by the string terminator or ".".
* @note the function may change the input string.
* @param path the string to parse, on DOS both \ and / are considered separators.
* @return A pointer to a string that's the parent directory of path.
* If path is a NULL pointer or points to an empty string, a pointer
* to a string "." is returned.
* @note the function may modify the contents of the path, so copies should be passed.
*/
const char *av_dirname(char *path);
@ -400,6 +405,12 @@ int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,
*/
int av_match_list(const char *name, const char *list, char separator);
/**
* See libc sscanf manual for more information.
* Locale-independent sscanf implementation.
*/
int av_sscanf(const char *string, const char *format, ...);
/**
* @}
*/

View File

@ -53,7 +53,7 @@
//rounded division & shift
#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
/* assume b>0 */
#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
#define ROUNDED_DIV(a,b) (((a)>=0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */
#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \
: ((a) + (1<<(b)) - 1) >> (b))
@ -228,7 +228,7 @@ static av_always_inline av_const int av_clip_intp2_c(int a, int p)
*/
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
{
if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);
if (a & ~((1<<p) - 1)) return (~a) >> 31 & ((1<<p) - 1);
else return a;
}
@ -240,7 +240,7 @@ static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
*/
static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a, unsigned p)
{
return a & ((1 << p) - 1);
return a & ((1U << p) - 1);
}
/**

View File

@ -54,6 +54,7 @@ typedef enum {
AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */
AV_CRC_24_IEEE,
AV_CRC_8_EBU,
AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
}AVCRCId;

View File

@ -0,0 +1,205 @@
/**
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_ENCRYPTION_INFO_H
#define AVUTIL_ENCRYPTION_INFO_H
#include <stddef.h>
#include <stdint.h>
typedef struct AVSubsampleEncryptionInfo {
/** The number of bytes that are clear. */
unsigned int bytes_of_clear_data;
/**
* The number of bytes that are protected. If using pattern encryption,
* the pattern applies to only the protected bytes; if not using pattern
* encryption, all these bytes are encrypted.
*/
unsigned int bytes_of_protected_data;
} AVSubsampleEncryptionInfo;
/**
* This describes encryption info for a packet. This contains frame-specific
* info for how to decrypt the packet before passing it to the decoder.
*
* The size of this struct is not part of the public ABI.
*/
typedef struct AVEncryptionInfo {
/** The fourcc encryption scheme, in big-endian byte order. */
uint32_t scheme;
/**
* Only used for pattern encryption. This is the number of 16-byte blocks
* that are encrypted.
*/
uint32_t crypt_byte_block;
/**
* Only used for pattern encryption. This is the number of 16-byte blocks
* that are clear.
*/
uint32_t skip_byte_block;
/**
* The ID of the key used to encrypt the packet. This should always be
* 16 bytes long, but may be changed in the future.
*/
uint8_t *key_id;
uint32_t key_id_size;
/**
* The initialization vector. This may have been zero-filled to be the
* correct block size. This should always be 16 bytes long, but may be
* changed in the future.
*/
uint8_t *iv;
uint32_t iv_size;
/**
* An array of subsample encryption info specifying how parts of the sample
* are encrypted. If there are no subsamples, then the whole sample is
* encrypted.
*/
AVSubsampleEncryptionInfo *subsamples;
uint32_t subsample_count;
} AVEncryptionInfo;
/**
* This describes info used to initialize an encryption key system.
*
* The size of this struct is not part of the public ABI.
*/
typedef struct AVEncryptionInitInfo {
/**
* A unique identifier for the key system this is for, can be NULL if it
* is not known. This should always be 16 bytes, but may change in the
* future.
*/
uint8_t* system_id;
uint32_t system_id_size;
/**
* An array of key IDs this initialization data is for. All IDs are the
* same length. Can be NULL if there are no known key IDs.
*/
uint8_t** key_ids;
/** The number of key IDs. */
uint32_t num_key_ids;
/**
* The number of bytes in each key ID. This should always be 16, but may
* change in the future.
*/
uint32_t key_id_size;
/**
* Key-system specific initialization data. This data is copied directly
* from the file and the format depends on the specific key system. This
* can be NULL if there is no initialization data; in that case, there
* will be at least one key ID.
*/
uint8_t* data;
uint32_t data_size;
/**
* An optional pointer to the next initialization info in the list.
*/
struct AVEncryptionInitInfo *next;
} AVEncryptionInitInfo;
/**
* Allocates an AVEncryptionInfo structure and sub-pointers to hold the given
* number of subsamples. This will allocate pointers for the key ID, IV,
* and subsample entries, set the size members, and zero-initialize the rest.
*
* @param subsample_count The number of subsamples.
* @param key_id_size The number of bytes in the key ID, should be 16.
* @param iv_size The number of bytes in the IV, should be 16.
*
* @return The new AVEncryptionInfo structure, or NULL on error.
*/
AVEncryptionInfo *av_encryption_info_alloc(uint32_t subsample_count, uint32_t key_id_size, uint32_t iv_size);
/**
* Allocates an AVEncryptionInfo structure with a copy of the given data.
* @return The new AVEncryptionInfo structure, or NULL on error.
*/
AVEncryptionInfo *av_encryption_info_clone(const AVEncryptionInfo *info);
/**
* Frees the given encryption info object. This MUST NOT be used to free the
* side-data data pointer, that should use normal side-data methods.
*/
void av_encryption_info_free(AVEncryptionInfo *info);
/**
* Creates a copy of the AVEncryptionInfo that is contained in the given side
* data. The resulting object should be passed to av_encryption_info_free()
* when done.
*
* @return The new AVEncryptionInfo structure, or NULL on error.
*/
AVEncryptionInfo *av_encryption_info_get_side_data(const uint8_t *side_data, size_t side_data_size);
/**
* Allocates and initializes side data that holds a copy of the given encryption
* info. The resulting pointer should be either freed using av_free or given
* to av_packet_add_side_data().
*
* @return The new side-data pointer, or NULL.
*/
uint8_t *av_encryption_info_add_side_data(
const AVEncryptionInfo *info, size_t *side_data_size);
/**
* Allocates an AVEncryptionInitInfo structure and sub-pointers to hold the
* given sizes. This will allocate pointers and set all the fields.
*
* @return The new AVEncryptionInitInfo structure, or NULL on error.
*/
AVEncryptionInitInfo *av_encryption_init_info_alloc(
uint32_t system_id_size, uint32_t num_key_ids, uint32_t key_id_size, uint32_t data_size);
/**
* Frees the given encryption init info object. This MUST NOT be used to free
* the side-data data pointer, that should use normal side-data methods.
*/
void av_encryption_init_info_free(AVEncryptionInitInfo* info);
/**
* Creates a copy of the AVEncryptionInitInfo that is contained in the given
* side data. The resulting object should be passed to
* av_encryption_init_info_free() when done.
*
* @return The new AVEncryptionInitInfo structure, or NULL on error.
*/
AVEncryptionInitInfo *av_encryption_init_info_get_side_data(
const uint8_t* side_data, size_t side_data_size);
/**
* Allocates and initializes side data that holds a copy of the given encryption
* init info. The resulting pointer should be either freed using av_free or
* given to av_packet_add_side_data().
*
* @return The new side-data pointer, or NULL.
*/
uint8_t *av_encryption_init_info_add_side_data(
const AVEncryptionInitInfo *info, size_t *side_data_size);
#endif /* AVUTIL_ENCRYPTION_INFO_H */

View File

@ -1,5 +1,5 @@
/* Automatically generated by version.sh, do not manually edit! */
#ifndef AVUTIL_FFVERSION_H
#define AVUTIL_FFVERSION_H
#define FFMPEG_VERSION "N-90075-g647fa49495"
#define FFMPEG_VERSION "N-95393-g29dac2927f"
#endif /* AVUTIL_FFVERSION_H */

View File

@ -33,6 +33,8 @@
* allocated buffer or map it with mmap() when available.
* In case of success set *bufptr to the read or mmapped buffer, and
* *size to the size in bytes of the buffer in *bufptr.
* Unlike mmap this function succeeds with zero sized files, in this
* case *bufptr will be set to NULL and *size will be set to 0.
* The returned buffer must be released with av_file_unmap().
*
* @param log_offset loglevel offset used for logging

View File

@ -141,6 +141,44 @@ enum AVFrameSideDataType {
* metadata key entry "name".
*/
AV_FRAME_DATA_ICC_PROFILE,
#if FF_API_FRAME_QP
/**
* Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
* The contents of this side data are undocumented and internal; use
* av_frame_set_qp_table() and av_frame_get_qp_table() to access this in a
* meaningful way instead.
*/
AV_FRAME_DATA_QP_TABLE_PROPERTIES,
/**
* Raw QP table data. Its format is described by
* AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use av_frame_set_qp_table() and
* av_frame_get_qp_table() to access this instead.
*/
AV_FRAME_DATA_QP_TABLE_DATA,
#endif
/**
* Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t
* where the first uint32_t describes how many (1-3) of the other timecodes are used.
* The timecode format is described in the av_timecode_get_smpte_from_framenum()
* function in libavutil/timecode.c.
*/
AV_FRAME_DATA_S12M_TIMECODE,
/**
* HDR dynamic metadata associated with a video frame. The payload is
* an AVDynamicHDRPlus type and contains information for color
* volume transform - application 4 of SMPTE 2094-40:2016 standard.
*/
AV_FRAME_DATA_DYNAMIC_HDR_PLUS,
/**
* Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of
* array element is implied by AVFrameSideData.size / AVRegionOfInterest.self_size.
*/
AV_FRAME_DATA_REGIONS_OF_INTEREST,
};
enum AVActiveFormatDescription {
@ -168,6 +206,62 @@ typedef struct AVFrameSideData {
AVBufferRef *buf;
} AVFrameSideData;
/**
* Structure describing a single Region Of Interest.
*
* When multiple regions are defined in a single side-data block, they
* should be ordered from most to least important - some encoders are only
* capable of supporting a limited number of distinct regions, so will have
* to truncate the list.
*
* When overlapping regions are defined, the first region containing a given
* area of the frame applies.
*/
typedef struct AVRegionOfInterest {
/**
* Must be set to the size of this data structure (that is,
* sizeof(AVRegionOfInterest)).
*/
uint32_t self_size;
/**
* Distance in pixels from the top edge of the frame to the top and
* bottom edges and from the left edge of the frame to the left and
* right edges of the rectangle defining this region of interest.
*
* The constraints on a region are encoder dependent, so the region
* actually affected may be slightly larger for alignment or other
* reasons.
*/
int top;
int bottom;
int left;
int right;
/**
* Quantisation offset.
*
* Must be in the range -1 to +1. A value of zero indicates no quality
* change. A negative value asks for better quality (less quantisation),
* while a positive value asks for worse quality (greater quantisation).
*
* The range is calibrated so that the extreme values indicate the
* largest possible offset - if the rest of the frame is encoded with the
* worst possible quality, an offset of -1 indicates that this region
* should be encoded with the best possible quality anyway. Intermediate
* values are then interpolated in some codec-dependent way.
*
* For example, in 10-bit H.264 the quantisation parameter varies between
* -12 and 51. A typical qoffset value of -1/10 therefore indicates that
* this region should be encoded with a QP around one-tenth of the full
* range better than the rest of the frame. So, if most of the frame
* were to be encoded with a QP of around 30, this region would get a QP
* of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3).
* An extreme value of -1 would indicate that this region should be
* encoded with the best possible quality regardless of the treatment of
* the rest of the frame - that is, should be encoded at a QP of -12.
*/
AVRational qoffset;
} AVRegionOfInterest;
/**
* This structure describes decoded (raw) audio or video data.
*
@ -364,7 +458,6 @@ typedef struct AVFrame {
* that time,
* the decoder reorders values as needed and sets AVFrame.reordered_opaque
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
* @deprecated in favor of pkt_pts
*/
int64_t reordered_opaque;
@ -497,6 +590,8 @@ typedef struct AVFrame {
int decode_error_flags;
#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
#define FF_DECODE_ERROR_MISSING_REFERENCE 2
#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
#define FF_DECODE_ERROR_DECODE_SLICES 8
/**
* number of audio channels, only used for audio.
@ -529,6 +624,7 @@ typedef struct AVFrame {
attribute_deprecated
int qscale_type;
attribute_deprecated
AVBufferRef *qp_table_buf;
#endif
/**
@ -800,6 +896,22 @@ AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
enum AVFrameSideDataType type,
int size);
/**
* Add a new side data to a frame from an existing AVBufferRef
*
* @param frame a frame to which the side data should be added
* @param type the type of the added side data
* @param buf an AVBufferRef to add as side data. The ownership of
* the reference is transferred to the frame.
*
* @return newly added side data on success, NULL on error. On failure
* the frame is unchanged and the AVBufferRef remains owned by
* the caller.
*/
AVFrameSideData *av_frame_new_side_data_from_buf(AVFrame *frame,
enum AVFrameSideDataType type,
AVBufferRef *buf);
/**
* @return a pointer to the side data of a given type on success, NULL if there
* is no side data with such type in this frame.

View File

@ -0,0 +1,343 @@
/*
* Copyright (c) 2018 Mohammad Izadi <moh.izadi at gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_HDR_DYNAMIC_METADATA_H
#define AVUTIL_HDR_DYNAMIC_METADATA_H
#include "frame.h"
#include "rational.h"
/**
* Option for overlapping elliptical pixel selectors in an image.
*/
enum AVHDRPlusOverlapProcessOption {
AV_HDR_PLUS_OVERLAP_PROCESS_WEIGHTED_AVERAGING = 0,
AV_HDR_PLUS_OVERLAP_PROCESS_LAYERING = 1,
};
/**
* Represents the percentile at a specific percentage in
* a distribution.
*/
typedef struct AVHDRPlusPercentile {
/**
* The percentage value corresponding to a specific percentile linearized
* RGB value in the processing window in the scene. The value shall be in
* the range of 0 to100, inclusive.
*/
uint8_t percentage;
/**
* The linearized maxRGB value at a specific percentile in the processing
* window in the scene. The value shall be in the range of 0 to 1, inclusive
* and in multiples of 0.00001.
*/
AVRational percentile;
} AVHDRPlusPercentile;
/**
* Color transform parameters at a processing window in a dynamic metadata for
* SMPTE 2094-40.
*/
typedef struct AVHDRPlusColorTransformParams {
/**
* The relative x coordinate of the top left pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(width of Picture - 1). The value 1 corresponds
* to the absolute coordinate of width of Picture - 1. The value for
* first processing window shall be 0.
*/
AVRational window_upper_left_corner_x;
/**
* The relative y coordinate of the top left pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(height of Picture - 1). The value 1 corresponds
* to the absolute coordinate of height of Picture - 1. The value for
* first processing window shall be 0.
*/
AVRational window_upper_left_corner_y;
/**
* The relative x coordinate of the bottom right pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(width of Picture - 1). The value 1 corresponds
* to the absolute coordinate of width of Picture - 1. The value for
* first processing window shall be 1.
*/
AVRational window_lower_right_corner_x;
/**
* The relative y coordinate of the bottom right pixel of the processing
* window. The value shall be in the range of 0 and 1, inclusive and
* in multiples of 1/(height of Picture - 1). The value 1 corresponds
* to the absolute coordinate of height of Picture - 1. The value for
* first processing window shall be 1.
*/
AVRational window_lower_right_corner_y;
/**
* The x coordinate of the center position of the concentric internal and
* external ellipses of the elliptical pixel selector in the processing
* window. The value shall be in the range of 0 to (width of Picture - 1),
* inclusive and in multiples of 1 pixel.
*/
uint16_t center_of_ellipse_x;
/**
* The y coordinate of the center position of the concentric internal and
* external ellipses of the elliptical pixel selector in the processing
* window. The value shall be in the range of 0 to (height of Picture - 1),
* inclusive and in multiples of 1 pixel.
*/
uint16_t center_of_ellipse_y;
/**
* The clockwise rotation angle in degree of arc with respect to the
* positive direction of the x-axis of the concentric internal and external
* ellipses of the elliptical pixel selector in the processing window. The
* value shall be in the range of 0 to 180, inclusive and in multiples of 1.
*/
uint8_t rotation_angle;
/**
* The semi-major axis value of the internal ellipse of the elliptical pixel
* selector in amount of pixels in the processing window. The value shall be
* in the range of 1 to 65535, inclusive and in multiples of 1 pixel.
*/
uint16_t semimajor_axis_internal_ellipse;
/**
* The semi-major axis value of the external ellipse of the elliptical pixel
* selector in amount of pixels in the processing window. The value
* shall not be less than semimajor_axis_internal_ellipse of the current
* processing window. The value shall be in the range of 1 to 65535,
* inclusive and in multiples of 1 pixel.
*/
uint16_t semimajor_axis_external_ellipse;
/**
* The semi-minor axis value of the external ellipse of the elliptical pixel
* selector in amount of pixels in the processing window. The value shall be
* in the range of 1 to 65535, inclusive and in multiples of 1 pixel.
*/
uint16_t semiminor_axis_external_ellipse;
/**
* Overlap process option indicates one of the two methods of combining
* rendered pixels in the processing window in an image with at least one
* elliptical pixel selector. For overlapping elliptical pixel selectors
* in an image, overlap_process_option shall have the same value.
*/
enum AVHDRPlusOverlapProcessOption overlap_process_option;
/**
* The maximum of the color components of linearized RGB values in the
* processing window in the scene. The values should be in the range of 0 to
* 1, inclusive and in multiples of 0.00001. maxscl[ 0 ], maxscl[ 1 ], and
* maxscl[ 2 ] are corresponding to R, G, B color components respectively.
*/
AVRational maxscl[3];
/**
* The average of linearized maxRGB values in the processing window in the
* scene. The value should be in the range of 0 to 1, inclusive and in
* multiples of 0.00001.
*/
AVRational average_maxrgb;
/**
* The number of linearized maxRGB values at given percentiles in the
* processing window in the scene. The maximum value shall be 15.
*/
uint8_t num_distribution_maxrgb_percentiles;
/**
* The linearized maxRGB values at given percentiles in the
* processing window in the scene.
*/
AVHDRPlusPercentile distribution_maxrgb[15];
/**
* The fraction of selected pixels in the image that contains the brightest
* pixel in the scene. The value shall be in the range of 0 to 1, inclusive
* and in multiples of 0.001.
*/
AVRational fraction_bright_pixels;
/**
* This flag indicates that the metadata for the tone mapping function in
* the processing window is present (for value of 1).
*/
uint8_t tone_mapping_flag;
/**
* The x coordinate of the separation point between the linear part and the
* curved part of the tone mapping function. The value shall be in the range
* of 0 to 1, excluding 0 and in multiples of 1/4095.
*/
AVRational knee_point_x;
/**
* The y coordinate of the separation point between the linear part and the
* curved part of the tone mapping function. The value shall be in the range
* of 0 to 1, excluding 0 and in multiples of 1/4095.
*/
AVRational knee_point_y;
/**
* The number of the intermediate anchor parameters of the tone mapping
* function in the processing window. The maximum value shall be 15.
*/
uint8_t num_bezier_curve_anchors;
/**
* The intermediate anchor parameters of the tone mapping function in the
* processing window in the scene. The values should be in the range of 0
* to 1, inclusive and in multiples of 1/1023.
*/
AVRational bezier_curve_anchors[15];
/**
* This flag shall be equal to 0 in bitstreams conforming to this version of
* this Specification. Other values are reserved for future use.
*/
uint8_t color_saturation_mapping_flag;
/**
* The color saturation gain in the processing window in the scene. The
* value shall be in the range of 0 to 63/8, inclusive and in multiples of
* 1/8. The default value shall be 1.
*/
AVRational color_saturation_weight;
} AVHDRPlusColorTransformParams;
/**
* This struct represents dynamic metadata for color volume transform -
* application 4 of SMPTE 2094-40:2016 standard.
*
* To be used as payload of a AVFrameSideData or AVPacketSideData with the
* appropriate type.
*
* @note The struct should be allocated with
* av_dynamic_hdr_plus_alloc() and its size is not a part of
* the public ABI.
*/
typedef struct AVDynamicHDRPlus {
/**
* Country code by Rec. ITU-T T.35 Annex A. The value shall be 0xB5.
*/
uint8_t itu_t_t35_country_code;
/**
* Application version in the application defining document in ST-2094
* suite. The value shall be set to 0.
*/
uint8_t application_version;
/**
* The number of processing windows. The value shall be in the range
* of 1 to 3, inclusive.
*/
uint8_t num_windows;
/**
* The color transform parameters for every processing window.
*/
AVHDRPlusColorTransformParams params[3];
/**
* The nominal maximum display luminance of the targeted system display,
* in units of 0.0001 candelas per square metre. The value shall be in
* the range of 0 to 10000, inclusive.
*/
AVRational targeted_system_display_maximum_luminance;
/**
* This flag shall be equal to 0 in bit streams conforming to this version
* of this Specification. The value 1 is reserved for future use.
*/
uint8_t targeted_system_display_actual_peak_luminance_flag;
/**
* The number of rows in the targeted system_display_actual_peak_luminance
* array. The value shall be in the range of 2 to 25, inclusive.
*/
uint8_t num_rows_targeted_system_display_actual_peak_luminance;
/**
* The number of columns in the
* targeted_system_display_actual_peak_luminance array. The value shall be
* in the range of 2 to 25, inclusive.
*/
uint8_t num_cols_targeted_system_display_actual_peak_luminance;
/**
* The normalized actual peak luminance of the targeted system display. The
* values should be in the range of 0 to 1, inclusive and in multiples of
* 1/15.
*/
AVRational targeted_system_display_actual_peak_luminance[25][25];
/**
* This flag shall be equal to 0 in bitstreams conforming to this version of
* this Specification. The value 1 is reserved for future use.
*/
uint8_t mastering_display_actual_peak_luminance_flag;
/**
* The number of rows in the mastering_display_actual_peak_luminance array.
* The value shall be in the range of 2 to 25, inclusive.
*/
uint8_t num_rows_mastering_display_actual_peak_luminance;
/**
* The number of columns in the mastering_display_actual_peak_luminance
* array. The value shall be in the range of 2 to 25, inclusive.
*/
uint8_t num_cols_mastering_display_actual_peak_luminance;
/**
* The normalized actual peak luminance of the mastering display used for
* mastering the image essence. The values should be in the range of 0 to 1,
* inclusive and in multiples of 1/15.
*/
AVRational mastering_display_actual_peak_luminance[25][25];
} AVDynamicHDRPlus;
/**
* Allocate an AVDynamicHDRPlus structure and set its fields to
* default values. The resulting struct can be freed using av_freep().
*
* @return An AVDynamicHDRPlus filled with default values or NULL
* on failure.
*/
AVDynamicHDRPlus *av_dynamic_hdr_plus_alloc(size_t *size);
/**
* Allocate a complete AVDynamicHDRPlus and add it to the frame.
* @param frame The frame which side data is added to.
*
* @return The AVDynamicHDRPlus structure to be filled by caller or NULL
* on failure.
*/
AVDynamicHDRPlus *av_dynamic_hdr_plus_create_side_data(AVFrame *frame);
#endif /* AVUTIL_HDR_DYNAMIC_METADATA_H */

View File

@ -41,6 +41,7 @@ typedef struct AVCUDADeviceContextInternal AVCUDADeviceContextInternal;
*/
typedef struct AVCUDADeviceContext {
CUcontext cuda_ctx;
CUstream stream;
AVCUDADeviceContextInternal *internal;
} AVCUDADeviceContext;

View File

@ -58,6 +58,9 @@ typedef struct AVDRMObjectDescriptor {
size_t size;
/**
* Format modifier applied to the object (DRM_FORMAT_MOD_*).
*
* If the format modifier is unknown then this should be set to
* DRM_FORMAT_MOD_INVALID.
*/
uint64_t format_modifier;
} AVDRMObjectDescriptor;

View File

@ -51,4 +51,10 @@ enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt);
*/
uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt);
/**
* Same as av_map_videotoolbox_format_from_pixfmt function, but can map and
* return full range pixel formats via a flag.
*/
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range);
#endif /* AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H */

View File

@ -542,6 +542,21 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
# define AV_WN64A(p, v) AV_WNA(64, p, v)
#endif
#if AV_HAVE_BIGENDIAN
# define AV_RLA(s, p) av_bswap##s(AV_RN##s##A(p))
# define AV_WLA(s, p, v) AV_WN##s##A(p, av_bswap##s(v))
#else
# define AV_RLA(s, p) AV_RN##s##A(p)
# define AV_WLA(s, p, v) AV_WN##s##A(p, v)
#endif
#ifndef AV_RL64A
# define AV_RL64A(p) AV_RLA(64, p)
#endif
#ifndef AV_WL64A
# define AV_WL64A(p, v) AV_WLA(64, p, v)
#endif
/*
* The AV_COPYxxU macros are suitable for copying data to/from unaligned
* memory locations.

View File

@ -1,66 +0,0 @@
/*
* LZO 1x decompression
* copyright (c) 2006 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_LZO_H
#define AVUTIL_LZO_H
/**
* @defgroup lavu_lzo LZO
* @ingroup lavu_crypto
*
* @{
*/
#include <stdint.h>
/** @name Error flags returned by av_lzo1x_decode
* @{ */
/// end of the input buffer reached before decoding finished
#define AV_LZO_INPUT_DEPLETED 1
/// decoded data did not fit into output buffer
#define AV_LZO_OUTPUT_FULL 2
/// a reference to previously decoded data was wrong
#define AV_LZO_INVALID_BACKPTR 4
/// a non-specific error in the compressed bitstream
#define AV_LZO_ERROR 8
/** @} */
#define AV_LZO_INPUT_PADDING 8
#define AV_LZO_OUTPUT_PADDING 12
/**
* @brief Decodes LZO 1x compressed data.
* @param out output buffer
* @param outlen size of output buffer, number of bytes left are returned here
* @param in input buffer
* @param inlen size of input buffer, number of bytes left are returned here
* @return 0 on success, otherwise a combination of the error flags above
*
* Make sure all buffers are appropriately padded, in must provide
* AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.
*/
int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);
/**
* @}
*/
#endif /* AVUTIL_LZO_H */

View File

@ -339,7 +339,7 @@ av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
* correctly aligned.
*/
av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
/**
* Reallocate the given buffer if it is not large enough, otherwise do nothing.
@ -363,10 +363,10 @@ av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
* @endcode
*
* @param[in,out] ptr Already allocated buffer, or `NULL`
* @param[in,out] size Pointer to current size of buffer `ptr`. `*size` is
* changed to `min_size` in case of success or 0 in
* case of failure
* @param[in] min_size New size of buffer `ptr`
* @param[in,out] size Pointer to the size of buffer `ptr`. `*size` is
* updated to the new allocated size, in particular 0
* in case of failure.
* @param[in] min_size Desired minimal size of buffer `ptr`
* @return `ptr` if the buffer is large enough, a pointer to newly reallocated
* buffer if the buffer was not large enough, or `NULL` in case of
* error
@ -397,10 +397,10 @@ void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
* @param[in,out] ptr Pointer to pointer to an already allocated buffer.
* `*ptr` will be overwritten with pointer to new
* buffer on success or `NULL` on failure
* @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is
* changed to `min_size` in case of success or 0 in
* case of failure
* @param[in] min_size New size of buffer `*ptr`
* @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is
* updated to the new allocated size, in particular 0
* in case of failure.
* @param[in] min_size Desired minimal size of buffer `*ptr`
* @see av_realloc()
* @see av_fast_mallocz()
*/
@ -418,10 +418,10 @@ void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
* @param[in,out] ptr Pointer to pointer to an already allocated buffer.
* `*ptr` will be overwritten with pointer to new
* buffer on success or `NULL` on failure
* @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is
* changed to `min_size` in case of success or 0 in
* case of failure
* @param[in] min_size New size of buffer `*ptr`
* @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is
* updated to the new allocated size, in particular 0
* in case of failure.
* @param[in] min_size Desired minimal size of buffer `*ptr`
* @see av_fast_malloc()
*/
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size);

View File

@ -1,177 +0,0 @@
/*
* copyright (c) 2006-2012 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_OLD_PIX_FMTS_H
#define AVUTIL_OLD_PIX_FMTS_H
/*
* This header exists to prevent new pixel formats from being accidentally added
* to the deprecated list.
* Do not include it directly. It will be removed on next major bump
*
* Do not add new items to this list. Use the AVPixelFormat enum instead.
*/
PIX_FMT_NONE = AV_PIX_FMT_NONE,
PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
PIX_FMT_GRAY8, ///< Y , 8bpp
PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
#if FF_API_XVMC
PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
PIX_FMT_XVMC_MPEG2_IDCT,
#endif /* FF_API_XVMC */
PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#if FF_API_VDPAU
PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
#endif
PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
#if FF_API_VDPAU
PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
#endif
PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha
PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
//the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus
//If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately
//is better
PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
#ifdef AV_PIX_FMT_ABI_GIT_MASTER
PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
#endif
PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian
PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian
PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian
PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian
PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian
PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian
#ifndef AV_PIX_FMT_ABI_GIT_MASTER
PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
#endif
PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian
PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian
PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian
PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian
PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
#endif /* AVUTIL_OLD_PIX_FMTS_H */

View File

@ -287,7 +287,10 @@ typedef struct AVOption {
* This flag only makes sense when AV_OPT_FLAG_EXPORT is also set.
*/
#define AV_OPT_FLAG_READONLY 128
#define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering
#define AV_OPT_FLAG_RUNTIME_PARAM (1<<15) ///< a generic parameter which can be set by the user at runtime
#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering
#define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information
//FIXME think about enc-audio, ... style flags
/**

View File

@ -154,17 +154,21 @@ typedef struct AVPixFmtDescriptor {
* in some cases be simpler. Or the data can be interpreted purely based on
* the pixel format without using the palette.
* An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8
*
* @deprecated This flag is deprecated, and will be removed. When it is removed,
* the extra palette allocation in AVFrame.data[1] is removed as well. Only
* actual paletted formats (as indicated by AV_PIX_FMT_FLAG_PAL) will have a
* palette. Starting with FFmpeg versions which have this flag deprecated, the
* extra "pseudo" palette is already ignored, and API users are not required to
* allocate a palette for AV_PIX_FMT_FLAG_PSEUDOPAL formats (it was required
* before the deprecation, though).
*/
#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6)
/**
* The pixel format has an alpha channel. This is set on all formats that
* support alpha in some way. The exception is AV_PIX_FMT_PAL8, which can
* carry alpha as part of the palette. Details are explained in the
* AVPixelFormat enum, and are also encoded in the corresponding
* AVPixFmtDescriptor.
*
* The alpha is always straight, never pre-multiplied.
* support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always
* straight, never pre-multiplied.
*
* If a codec or a filter does not support alpha, it should set all alpha to
* opaque, or use the equivalent pixel formats without alpha component, e.g.
@ -339,7 +343,13 @@ char *av_get_pix_fmt_string(char *buf, int buf_size,
* format writes the values corresponding to the palette
* component c in data[1] to dst, rather than the palette indexes in
* data[0]. The behavior is undefined if the format is not paletted.
* @param dst_element_size size of elements in dst array (2 or 4 byte)
*/
void av_read_image_line2(void *dst, const uint8_t *data[4],
const int linesize[4], const AVPixFmtDescriptor *desc,
int x, int y, int c, int w, int read_pal_component,
int dst_element_size);
void av_read_image_line(uint16_t *dst, const uint8_t *data[4],
const int linesize[4], const AVPixFmtDescriptor *desc,
int x, int y, int c, int w, int read_pal_component);
@ -357,7 +367,12 @@ void av_read_image_line(uint16_t *dst, const uint8_t *data[4],
* @param y the vertical coordinate of the first pixel to write
* @param w the width of the line to write, that is the number of
* values to write to the image line
* @param src_element_size size of elements in src array (2 or 4 byte)
*/
void av_write_image_line2(const void *src, uint8_t *data[4],
const int linesize[4], const AVPixFmtDescriptor *desc,
int x, int y, int c, int w, int src_element_size);
void av_write_image_line(const uint16_t *src, uint8_t *data[4],
const int linesize[4], const AVPixFmtDescriptor *desc,
int x, int y, int c, int w);

View File

@ -42,6 +42,10 @@
* This is stored as BGRA on little-endian CPU architectures and ARGB on
* big-endian CPUs.
*
* @note
* If the resolution is not a multiple of the chroma subsampling factor
* then the chroma plane resolution must be rounded up.
*
* @par
* When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
* image data is stored in AVFrame.data[0]. The palette is transported in
@ -330,6 +334,20 @@ enum AVPixelFormat {
*/
AV_PIX_FMT_OPENCL,
AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian
AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian
AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian
AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian
AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian
AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian
AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian
AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian
AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped
AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
};
@ -349,6 +367,7 @@ enum AVPixelFormat {
#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE)
#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE)
#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE)
#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE)
#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
@ -397,12 +416,16 @@ enum AVPixelFormat {
#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE)
#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
#define AV_PIX_FMT_YUVA422P12 AV_PIX_FMT_NE(YUVA422P12BE, YUVA422P12LE)
#define AV_PIX_FMT_YUVA444P12 AV_PIX_FMT_NE(YUVA444P12BE, YUVA444P12LE)
#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
@ -433,7 +456,8 @@ enum AVColorPrimaries {
AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3
AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors
AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E / JEDEC P22 phosphors
AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213,
AVCOL_PRI_NB ///< Not part of ABI
};

View File

@ -95,6 +95,14 @@ void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
void av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq,
void (*free_func)(void *msg));
/**
* Return the current number of messages in the queue.
*
* @return the current number of messages or AVERROR(ENOSYS) if lavu was built
* without thread support
*/
int av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq);
/**
* Flush the message queue
*

View File

@ -0,0 +1,93 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_TX_H
#define AVUTIL_TX_H
#include <stdint.h>
#include <stddef.h>
typedef struct AVTXContext AVTXContext;
typedef struct AVComplexFloat {
float re, im;
} AVComplexFloat;
typedef struct AVComplexDouble {
double re, im;
} AVComplexDouble;
enum AVTXType {
/**
* Standard complex to complex FFT with sample data type AVComplexFloat.
* Scaling currently unsupported
*/
AV_TX_FLOAT_FFT = 0,
/**
* Standard MDCT with sample data type of float and a scale type of
* float. Length is the frame size, not the window size (which is 2x frame)
*/
AV_TX_FLOAT_MDCT = 1,
/**
* Same as AV_TX_FLOAT_FFT with a data type of AVComplexDouble.
*/
AV_TX_DOUBLE_FFT = 2,
/**
* Same as AV_TX_FLOAT_MDCT with data and scale type of double.
*/
AV_TX_DOUBLE_MDCT = 3,
};
/**
* Function pointer to a function to perform the transform.
*
* @note Using a different context than the one allocated during av_tx_init()
* is not allowed.
*
* @param s the transform context
* @param out the output array
* @param in the input array
* @param stride the input or output stride (depending on transform direction)
* in bytes, currently implemented for all MDCT transforms
*/
typedef void (*av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride);
/**
* Initialize a transform context with the given configuration
* Currently power of two lengths from 4 to 131072 are supported, along with
* any length decomposable to a power of two and either 3, 5 or 15.
*
* @param ctx the context to allocate, will be NULL on error
* @param tx pointer to the transform function pointer to set
* @param type type the type of transform
* @param inv whether to do an inverse or a forward transform
* @param len the size of the transform in samples
* @param scale pointer to the value to scale the output if supported by type
* @param flags currently unused
*
* @return 0 on success, negative error code on failure
*/
int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type,
int inv, int len, const void *scale, uint64_t flags);
/**
* Frees a context and sets ctx to NULL, does nothing when ctx == NULL
*/
void av_tx_uninit(AVTXContext **ctx);
#endif /* AVUTIL_TX_H */

View File

@ -79,7 +79,7 @@
*/
#define LIBAVUTIL_VERSION_MAJOR 56
#define LIBAVUTIL_VERSION_MINOR 7
#define LIBAVUTIL_VERSION_MINOR 35
#define LIBAVUTIL_VERSION_MICRO 101
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
@ -126,6 +126,9 @@
#ifndef FF_API_FRAME_GET_SET
#define FF_API_FRAME_GET_SET (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_PSEUDOPAL
#define FF_API_PSEUDOPAL (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
/**

View File

@ -29,8 +29,8 @@
#include "libavutil/avutil.h"
#define LIBSWRESAMPLE_VERSION_MAJOR 3
#define LIBSWRESAMPLE_VERSION_MINOR 0
#define LIBSWRESAMPLE_VERSION_MICRO 101
#define LIBSWRESAMPLE_VERSION_MINOR 6
#define LIBSWRESAMPLE_VERSION_MICRO 100
#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \
LIBSWRESAMPLE_VERSION_MINOR, \

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
ThirdParty/ffmpeg/lib/libopus.a vendored Normal file

Binary file not shown.

Binary file not shown.

View File

@ -5,10 +5,10 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include
Name: libavcodec
Description: FFmpeg codec library
Version: 58.11.101
Requires: libswresample >= 3.0.101, libavutil >= 56.7.101
Version: 58.59.102
Requires: libavutil >= 56.35.101
Requires.private:
Conflicts:
Libs: -L${libdir} -lavcodec -pthread -lm
Libs: -L${libdir} -lavcodec -pthread -liconv -lm -L/usr/local/Cellar/opus/1.3.1/lib -lopus
Libs.private:
Cflags: -I${includedir}

View File

@ -5,8 +5,8 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include
Name: libavformat
Description: FFmpeg container format library
Version: 58.9.100
Requires: libavcodec >= 58.11.101, libswresample >= 3.0.101, libavutil >= 56.7.101
Version: 58.33.100
Requires: libavcodec >= 58.59.102, libavutil >= 56.35.101
Requires.private:
Conflicts:
Libs: -L${libdir} -lavformat -lm -lz

View File

@ -5,7 +5,7 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include
Name: libavutil
Description: FFmpeg utility library
Version: 56.7.101
Version: 56.35.101
Requires:
Requires.private:
Conflicts:

View File

@ -5,8 +5,8 @@ includedir=/Users/chris/Source/Repos/cog/ThirdParty/ffmpeg/include
Name: libswresample
Description: FFmpeg audio resampling library
Version: 3.0.101
Requires: libavutil >= 56.7.101
Version: 3.6.100
Requires: libavutil >= 56.35.101
Requires.private:
Conflicts:
Libs: -L${libdir} -lswresample -lm

View File

@ -117,11 +117,12 @@ int main(int argc, char *argv[])
end:
avformat_close_input(&fmt_ctx);
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
if (avio_ctx) {
if (avio_ctx)
av_freep(&avio_ctx->buffer);
av_freep(&avio_ctx);
}
avio_context_free(&avio_ctx);
av_file_unmap(buffer, buffer_size);
if (ret < 0) {

View File

@ -39,6 +39,35 @@
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096
static int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt)
{
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return -1;
}
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
FILE *outfile)
{
@ -86,6 +115,9 @@ int main(int argc, char **argv)
size_t data_size;
AVPacket *pkt;
AVFrame *decoded_frame = NULL;
enum AVSampleFormat sfmt;
int n_channels = 0;
const char *fmt;
if (argc <= 2) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
@ -172,6 +204,26 @@ int main(int argc, char **argv)
pkt->size = 0;
decode(c, pkt, decoded_frame, outfile);
/* print output pcm infomations, because there have no metadata of pcm */
sfmt = c->sample_fmt;
if (av_sample_fmt_is_planar(sfmt)) {
const char *packed = av_get_sample_fmt_name(sfmt);
printf("Warning: the sample format the decoder produced is planar "
"(%s). This example will output the first channel only.\n",
packed ? packed : "?");
sfmt = av_get_packed_sample_fmt(sfmt);
}
n_channels = c->channels;
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
goto end;
printf("Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
fmt, n_channels, c->sample_rate,
outfilename);
end:
fclose(outfile);
fclose(f);

View File

@ -95,7 +95,8 @@ int main(int argc, char **argv)
AVPacket *pkt;
if (argc <= 2) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
fprintf(stderr, "Usage: %s <input file> <output file>\n"
"And check your input file is encoded by mpeg1video please.\n", argv[0]);
exit(0);
}
filename = argv[1];

View File

@ -186,7 +186,8 @@ int main(int argc, char **argv)
encode(c, NULL, pkt, f);
/* add sequence end code to have a real MPEG file */
fwrite(endcode, 1, sizeof(endcode), f);
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_free_context(&c);

View File

@ -289,8 +289,6 @@ int main(int argc, char *argv[])
return 1;
}
avfilter_register_all();
/* Allocate the frame we will be using to store the data. */
frame = av_frame_alloc();
if (!frame) {

View File

@ -74,7 +74,6 @@ static int open_input_file(const char *filename)
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
@ -228,8 +227,6 @@ int main(int argc, char **argv)
exit(1);
}
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)

View File

@ -29,6 +29,8 @@
#define _XOPEN_SOURCE 600 /* for usleep */
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
@ -77,7 +79,6 @@ static int open_input_file(const char *filename)
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the video decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
@ -210,19 +211,20 @@ int main(int argc, char **argv)
{
int ret;
AVPacket packet;
AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
AVFrame *frame;
AVFrame *filt_frame;
if (!frame || !filt_frame) {
perror("Could not allocate frame");
exit(1);
}
if (argc != 2) {
fprintf(stderr, "Usage: %s file\n", argv[0]);
exit(1);
}
avfilter_register_all();
frame = av_frame_alloc();
filt_frame = av_frame_alloc();
if (!frame || !filt_frame) {
perror("Could not allocate frame");
exit(1);
}
if ((ret = open_input_file(argv[1])) < 0)
goto end;
@ -250,27 +252,25 @@ int main(int argc, char **argv)
goto end;
}
if (ret >= 0) {
frame->pts = frame->best_effort_timestamp;
frame->pts = frame->best_effort_timestamp;
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
av_packet_unref(&packet);

View File

@ -4,21 +4,23 @@
*
* HW Acceleration API (video decoding) decode sample
*
* This file is part of FFmpeg.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
@ -86,7 +88,7 @@ static int decode_write(AVCodecContext *avctx, AVPacket *packet)
return ret;
}
while (ret >= 0) {
while (1) {
if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
fprintf(stderr, "Can not alloc frame\n");
ret = AVERROR(ENOMEM);
@ -138,13 +140,10 @@ static int decode_write(AVCodecContext *avctx, AVPacket *packet)
fail:
av_frame_free(&frame);
av_frame_free(&sw_frame);
if (buffer)
av_freep(&buffer);
av_freep(&buffer);
if (ret < 0)
return ret;
}
return 0;
}
int main(int argc, char *argv[])
@ -214,7 +213,6 @@ int main(int argc, char *argv[])
return -1;
decoder_ctx->get_format = get_hw_format;
av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
if (hw_decoder_init(decoder_ctx, type) < 0)
return -1;

View File

@ -47,6 +47,11 @@ int main (int argc, char **argv)
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
return ret;
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
printf("%s=%s\n", tag->key, tag->value);

View File

@ -285,7 +285,7 @@ static AVFrame *get_audio_frame(OutputStream *ost)
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
return NULL;
for (j = 0; j <frame->nb_samples; j++) {
@ -464,7 +464,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, c->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
return NULL;
/* when we pass a frame to the encoder, it may keep a reference to it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2017 Andreas Unterweger
* Copyright (c) 2013-2018 Andreas Unterweger
*
* This file is part of FFmpeg.
*
@ -387,24 +387,39 @@ static int decode_audio_frame(AVFrame *frame,
}
}
/* Decode the audio frame stored in the temporary packet.
* The input audio stream decoder is used to do this.
* If we are at the end of the file, pass an empty packet to the decoder
* to flush it. */
if ((error = avcodec_decode_audio4(input_codec_context, frame,
data_present, &input_packet)) < 0) {
fprintf(stderr, "Could not decode frame (error '%s')\n",
/* Send the audio frame stored in the temporary packet to the decoder.
* The input audio stream decoder is used to do this. */
if ((error = avcodec_send_packet(input_codec_context, &input_packet)) < 0) {
fprintf(stderr, "Could not send packet for decoding (error '%s')\n",
av_err2str(error));
av_packet_unref(&input_packet);
return error;
}
/* If the decoder has not been flushed completely, we are not finished,
* so that this function has to be called again. */
if (*finished && *data_present)
*finished = 0;
/* Receive one frame from the decoder. */
error = avcodec_receive_frame(input_codec_context, frame);
/* If the decoder asks for more data to be able to decode a frame,
* return indicating that no data is present. */
if (error == AVERROR(EAGAIN)) {
error = 0;
goto cleanup;
/* If the end of the input file is reached, stop decoding. */
} else if (error == AVERROR_EOF) {
*finished = 1;
error = 0;
goto cleanup;
} else if (error < 0) {
fprintf(stderr, "Could not decode frame (error '%s')\n",
av_err2str(error));
goto cleanup;
/* Default case: Return decoded data. */
} else {
*data_present = 1;
goto cleanup;
}
cleanup:
av_packet_unref(&input_packet);
return 0;
return error;
}
/**
@ -538,7 +553,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
AVFrame *input_frame = NULL;
/* Temporary storage for the converted input samples. */
uint8_t **converted_input_samples = NULL;
int data_present;
int data_present = 0;
int ret = AVERROR_EXIT;
/* Initialize temporary storage for one input frame. */
@ -551,7 +566,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
/* If we are at the end of the file and there are no more samples
* in the decoder which are delayed, we are actually finished.
* This must not be treated as an error. */
if (*finished && !data_present) {
if (*finished) {
ret = 0;
goto cleanup;
}
@ -637,7 +652,7 @@ static int64_t pts = 0;
* @param output_format_context Format context of the output file
* @param output_codec_context Codec context of the output file
* @param[out] data_present Indicates whether data has been
* decoded
* encoded
* @return Error code (0 if successful)
*/
static int encode_audio_frame(AVFrame *frame,
@ -656,29 +671,50 @@ static int encode_audio_frame(AVFrame *frame,
pts += frame->nb_samples;
}
/* Encode the audio frame and store it in the temporary packet.
/* Send the audio frame stored in the temporary packet to the encoder.
* The output audio stream encoder is used to do this. */
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
frame, data_present)) < 0) {
fprintf(stderr, "Could not encode frame (error '%s')\n",
error = avcodec_send_frame(output_codec_context, frame);
/* The encoder signals that it has nothing more to encode. */
if (error == AVERROR_EOF) {
error = 0;
goto cleanup;
} else if (error < 0) {
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
av_err2str(error));
av_packet_unref(&output_packet);
return error;
}
/* Write one audio frame from the temporary packet to the output file. */
if (*data_present) {
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
fprintf(stderr, "Could not write frame (error '%s')\n",
av_err2str(error));
av_packet_unref(&output_packet);
return error;
}
av_packet_unref(&output_packet);
/* Receive one encoded frame from the encoder. */
error = avcodec_receive_packet(output_codec_context, &output_packet);
/* If the encoder asks for more data to be able to provide an
* encoded frame, return indicating that no data is present. */
if (error == AVERROR(EAGAIN)) {
error = 0;
goto cleanup;
/* If the last frame has been encoded, stop encoding. */
} else if (error == AVERROR_EOF) {
error = 0;
goto cleanup;
} else if (error < 0) {
fprintf(stderr, "Could not encode frame (error '%s')\n",
av_err2str(error));
goto cleanup;
/* Default case: Return encoded data. */
} else {
*data_present = 1;
}
return 0;
/* Write one audio frame from the temporary packet to the output file. */
if (*data_present &&
(error = av_write_frame(output_format_context, &output_packet)) < 0) {
fprintf(stderr, "Could not write frame (error '%s')\n",
av_err2str(error));
goto cleanup;
}
cleanup:
av_packet_unref(&output_packet);
return error;
}
/**
@ -816,6 +852,7 @@ int main(int argc, char **argv)
int data_written;
/* Flush the encoder as it may have delayed frames. */
do {
data_written = 0;
if (encode_audio_frame(NULL, output_format_context,
output_codec_context, &data_written))
goto cleanup;

View File

@ -172,6 +172,9 @@ static int open_output_file(const char *filename)
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
@ -183,8 +186,6 @@ static int open_output_file(const char *filename)
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
return ret;
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
out_stream->time_base = enc_ctx->time_base;
stream_ctx[i].enc_ctx = enc_ctx;
@ -517,8 +518,6 @@ int main(int argc, char **argv)
return 1;
}
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = open_output_file(argv[2])) < 0)

View File

@ -1,21 +1,23 @@
/*
* Video Acceleration API (video encoding) encode sample
*
* This file is part of FFmpeg.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**

View File

@ -1,21 +1,23 @@
/*
* Video Acceleration API (video transcoding) transcode sample
*
* This file is part of FFmpeg.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
@ -177,7 +179,7 @@ static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
}
/* set AVCodecContext Parameters for encoder, here we keep them stay
* the same as decoder.
* xxx: now the the sample can't handle resolution change case.
* xxx: now the sample can't handle resolution change case.
*/
encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);
encoder_ctx->pix_fmt = AV_PIX_FMT_VAAPI;