ffmpeg color space sws scale conversion use case

ffmpeg color space conversion use case

/*
 * Copyright (c) 2010 Nicolas George
 * Copyright (c) 2011 Stefano Sabatini
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

/**
 * @file
 * API example for decoding and filtering
 * @example filtering_video.c
 */

#define _XOPEN_SOURCE 600 /* for usleep */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#ifdef __cplusplus
extern "C" {
    
    
#endif
#include <libavcodec/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
}
#endif
#include <pthread.h>
#include <time.h>
#include <chrono>
#include <string>

// AV_PIX_FMT_YUV444P AV_PIX_FMT_GRAY8
#define TAS AV_PIX_FMT_GRAY8
const char *filter_descr = "scale=78:24,transpose=cclock";
/* other way:
   scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output
   pads respectively
 */

static AVFormatContext *fmt_ctx;
static AVCodecContext * dec_ctx;

static int     video_stream_index = -1;
static int64_t last_pts           = AV_NOPTS_VALUE;
SwsContext *   sws_ctx            = NULL;

static FILE *video_dst_file = NULL;
static FILE *video_mid_file = NULL;
static int   video_dst_bufsize;
static int   count = 0;

static void writeFile(AVCodecContext *ctx, AVFrame *frame, FILE *f) {
    
    
    if (!frame) {
    
    
        printf("frame is null\n");
        return;
    }
    video_dst_bufsize = 0;

    int Y = frame->linesize[0] * ctx->height;

    video_dst_bufsize = Y;
    printf("Y=%d,allsize=%d\n", Y, video_dst_bufsize);
    fwrite(frame->data[0], 1, Y, f);
    count++;
    printf("decode:%d\n", count);
}

static int open_input_file(const char *filename) {
    
    
    int      ret;
    AVCodec *dec;

    if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
    
    
        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
        return ret;
    }

    if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
    
    
        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
        return ret;
    }

    /* select the video stream */
    ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
    if (ret < 0) {
    
    
        av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
        return ret;
    }
    video_stream_index = ret;

    /* create decoding context */
    dec_ctx = avcodec_alloc_context3(dec);
    if (!dec_ctx)
        return AVERROR(ENOMEM);
    avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);

    /* init the video decoder */
    if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
    
    
        av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
        return ret;
    }

    return 0;
}

static int init_filters(AVCodecContext *ctx) {
    
    
    int ret = -1;
    sws_ctx = sws_alloc_context();

    // AV_PIX_FMT_GRAY8  AV_PIX_FMT_YUV444P
    sws_ctx = sws_getContext(ctx->width, ctx->height, ctx->pix_fmt, ctx->width, ctx->height,
        TAS, SWS_BICUBIC, NULL, NULL, NULL);
    if (!sws_ctx) {
    
    
        printf("sws_getContext error.\n");
        goto end1;
    }
    ret = 0;
    return ret;

end1:
    sws_freeContext(sws_ctx);
    return ret;
}

typedef void (*ffmpeg_log_callback)(void *ptr, int level, const char *fmt, va_list vl);

#define LOG_BUF_PREFIX_SIZE 512
#define LOG_BUF_SIZE 1024
static char            logBufPrefix[LOG_BUF_PREFIX_SIZE];
static char            logBuffer[LOG_BUF_SIZE];
static pthread_mutex_t cb_av_log_lock;

static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl) {
    
    
    int cnt;
    pthread_mutex_lock(&cb_av_log_lock);
    cnt = snprintf(logBufPrefix, LOG_BUF_PREFIX_SIZE, "%s", fmt);
    cnt = vsnprintf(logBuffer, LOG_BUF_SIZE, logBufPrefix, vl);
    printf("%s", logBuffer);
    pthread_mutex_unlock(&cb_av_log_lock);
}

int main(int argc, char **argv) {
    
    
    int      ret;
    AVPacket packet;
    AVFrame *frame;
    AVFrame *filt_frame;
    uint8_t *buffer;
    int      bytes;

    if (argc != 3) {
    
    
        fprintf(stderr, "Usage: %s inputfile outfile\n", argv[0]);
        exit(1);
    }

    ffmpeg_log_callback fptrLog = log_callback_null;
    av_log_set_level(AV_LOG_TRACE);
    // av_log_set_flags(AV_LOG_SKIP_REPEATED);
    av_log_set_callback(fptrLog);

    char *video_src_filename = argv[1];
    char *video_dst_filename = argv[2];
    char  video_mid_filename[256];
    memset(video_mid_filename, 0, 256);
    sprintf(video_mid_filename, "mid-yuv420-%s", video_dst_filename);

    frame      = av_frame_alloc();
    filt_frame = av_frame_alloc();
    if (!frame || !filt_frame) {
    
    
        perror("Could not allocate frame");
        exit(1);
    }

    if ((ret = open_input_file(video_src_filename)) < 0)
        goto end;

    video_dst_file = fopen(video_dst_filename, "wb");
    if (!video_dst_file) {
    
    
        printf("Could not open destination file %s\n", video_dst_filename);
        goto end;
    }

    video_mid_file = fopen(video_mid_filename, "wb");
    if (!video_mid_file) {
    
    
        printf("Could not open destination file %s\n", video_mid_filename);
        goto end;
    }

    if ((ret = init_filters(dec_ctx)) < 0)
        goto end;

    bytes  = av_image_get_buffer_size(TAS, dec_ctx->width, dec_ctx->height, 1);
    buffer = (uint8_t *)av_malloc(bytes);
    memset(filt_frame, 0, sizeof(filt_frame));
    ret = av_image_fill_arrays(filt_frame->data, filt_frame->linesize, buffer, TAS,
        dec_ctx->width, dec_ctx->height, 1);

    /* read all packets */
    while (1) {
    
    
        if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
            break;

        if (packet.stream_index == video_stream_index) {
    
    
            ret = avcodec_send_packet(dec_ctx, &packet);
            if (ret < 0) {
    
    
                av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
                break;
            }

            while (ret >= 0) {
    
    
                ret = avcodec_receive_frame(dec_ctx, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
    
    
                    break;
                } else if (ret < 0) {
    
    
                    av_log(NULL, AV_LOG_ERROR,
                        "Error while receiving a frame from the decoder\n");
                    goto end;
                }

                // frame->pts = frame->best_effort_timestamp;
                writeFile(dec_ctx, frame, video_mid_file);

                /* push the decoded frame into the filtergraph */
                int ret = sws_scale(sws_ctx, (const uint8_t *const *)frame->data,
                    frame->linesize, 0, frame->height, filt_frame->data, filt_frame->linesize);
                if (ret < 0) {
    
    
                    printf("sws_scale err!\n");
                }
                writeFile(dec_ctx, filt_frame, video_dst_file);
                av_frame_unref(frame);
            }
        }
        av_packet_unref(&packet);
    }

    printf("all frames:%d\n", count);

end:
    fclose(video_dst_file);
    fclose(video_mid_file);
    avcodec_free_context(&dec_ctx);
    avformat_close_input(&fmt_ctx);
    sws_freeContext(sws_ctx);
    av_frame_free(&frame);
    av_frame_free(&filt_frame);
}

CMakeLists.txt

# CMake 最低版本号要求
cmake_minimum_required (VERSION 3.5)

# 项目信息
project (ffmdecoder-Demo1)
set(EXECUTABLE_OUTPUT_PATH ${
    
    PROJECT_BINARY_DIR}/bin)

set(CMAKE_BUILD_TYPE "Debug")
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -fpermissive")
# set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall -fpermissive")

# set(root_path /workspace/depends)

#  if(NOT CMAKE_TOOLCHAIN_FILE)
#    set(CMAKE_TOOLCHAIN_FILE "aarch64.cmake")
#    include(${
      
      CMAKE_TOOLCHAIN_FILE})
#    set(root_path /root/workspace/depends)
#  endif()
# message("-- Using toolchain: ${CMAKE_TOOLCHAIN_FILE}")






#dependencies
#set(ENV{
      
      PKG_CONFIG_PATH} "${root_path}/simple-x86-omx-ffmpeg5.0/lib/pkgconfig")#
set(ENV{
    
    PKG_CONFIG_PATH} "/workspace/FFmpeg-n4.5-dev/libffmpeg/lib/pkgconfig") #libffmpeg5.0


find_package(PkgConfig)

pkg_check_modules(FFMPEG REQUIRED  libavformat libavutil  libavcodec libswscale)
message(STATUS "=== FFMPEG_LIBRARIES: ${FFMPEG_LIBRARIES}")
message(STATUS "=== FFMPEG_INCLUDE_DIRS: ${FFMPEG_INCLUDE_DIRS}")
message(STATUS "=== FFMPEG_LIBRARY_DIRS: ${FFMPEG_LIBRARY_DIRS}") 
message(STATUS "=== FFMPEG_CFLAGS: ${FFMPEG_CFLAGS}") 


FOREACH(flag ${
    
    FFMPEG_CFLAGS}) 
	SET(EXTRA_CFLGAS "${EXTRA_CFLGAS} ${flag}") 
ENDFOREACH(flag) 

SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CFLGAS}") 
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CFLGAS}") 


include_directories(${
    
    FFMPEG_INCLUDE_DIRS} )
link_directories(${
    
    FFMPEG_LIBRARY_DIRS})

set(SOURCES main.cpp)

# 指定生成目标
add_executable(FFm_Dec ${
    
    SOURCES})
 target_link_libraries(FFm_Dec ${
    
    FFMPEG_LIBRARIES})

Guess you like

Origin blog.csdn.net/weixin_43360707/article/details/128921021