#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavutil/time.h>
#include <libavutil/frame.h>
#include <libavutil/pixfmt.h>
#include <iostream>
#include <cstdint>

extern "C" {
#include <libavutil/time.h>
}

typedef struct {
    AVFrame *frame;
    int64_t pts; // Presentation TimeStamp
} AVPacketData;

void encode_and_send_audio(AVCodecContext *audio_c, AVFormatContext *oc, AVPacketData *audio_data) {
    AVPacket pkt = {0};
    av_init_packet(&pkt);

    // Send the audio frame to the encoder
    int ret = avcodec_send_frame(audio_c, audio_data->frame);
    if (ret < 0) {
        std::cerr << "Error sending audio frame to encoder." << std::endl;
        return;
    }

    // Receive the encoded packet from the encoder
    ret = avcodec_receive_packet(audio_c, &pkt);
    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
        return;
    else if (ret < 0) {
        std::cerr << "Error during encoding." << std::endl;
        return;
    }

    // Set the presentation timestamp
    pkt.pts = audio_data->pts;
    pkt.dts = audio_data->pts;
    pkt.duration = 1000 / audio_c->sample_rate;
    pkt.stream_index = oc->nb_streams - 1; // Assuming audio is the last stream

    // Write the encoded packet to the output format context
    ret = av_interleaved_write_frame(oc, &pkt);
    if (ret < 0) {
        std::cerr << "Error muxing packet." << std::endl;
        return;
    }

    // Free the packet
    av_packet_unref(&pkt);
}

void encode_and_send_video(AVCodecContext *video_c, AVFormatContext *oc, AVPacketData *video_data) {
    AVPacket pkt = {0};
    av_init_packet(&pkt);

    // Send the video frame to the encoder
    int ret = avcodec_send_frame(video_c, video_data->frame);
    if (ret < 0) {
        std::cerr << "Error sending video frame to encoder." << std::endl;
        return;
    }

    // Receive the encoded packet from the encoder
    ret = avcodec_receive_packet(video_c, &pkt);
    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
        return;
    else if (ret < 0) {
        std::cerr << "Error during encoding." << std::endl;
        return;
    }

    // Set the presentation timestamp
    pkt.pts = video_data->pts;
    pkt.dts = video_data->pts;
    pkt.duration = 1000 / 25; // Assuming 25 fps
    pkt.stream_index = oc->nb_streams - 2; // Assuming video is the second last stream

    // Write the encoded packet to the output format context
    ret = av_interleaved_write_frame(oc, &pkt);
    if (ret < 0) {
        std::cerr << "Error muxing packet." << std::endl;
        return;
    }

    // Free the packet
    av_packet_unref(&pkt);
}

int main() {
    const char *rtmp_url = "rtmp://your_rtmp_server/live/stream_name";
    AVFormatContext *oc = nullptr;
    AVCodec *video_codec = nullptr, *audio_codec = nullptr;
    AVCodecContext *video_c = nullptr, *audio_c = nullptr;
    AVStream *video_st = nullptr, *audio_st = nullptr;
    int ret, i;

    av_register_all();
    avformat_network_init();

    // 创建输出上下文
    ret = avformat_alloc_output_context2(&oc, nullptr, "flv", rtmp_url);
    if (ret < 0) {
        std::cerr << "Could not create output context." << std::endl;
        return ret;
    }

    // 查找视频编码器
    video_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!video_codec) {
        std::cerr << "Video codec not found." << std::endl;
        return -1;
    }

    // 分配视频编码上下文
    video_c = avcodec_alloc_context3(video_codec);
    if (!video_c) {
        std::cerr << "Could not allocate video encoding context." << std::endl;
        return AVERROR(ENOMEM);
    }

    // 设置视频参数
    video_c->bit_rate = 400000; // 400k bit/s
    video_c->width = 352;
    video_c->height = 288;
    video_c->time_base = (AVRational){1, 25}; // 25 fps
    video_c->gop_size = 12; // key frame every 12 frames
    video_c->pix_fmt = AV_PIX_FMT_YUV420P;

    // 添加视频流
    video_st = avformat_new_stream(oc, video_codec);
    if (!video_st) {
        std::cerr << "Failed allocating video stream." << std::endl;
        return AVERROR_UNKNOWN;
    }
    video_st->time_base = (AVRational){1, 25};

    // 将编码器设置到流中
    ret = avcodec_parameters_from_context(video_st->codecpar, video_c);
    if (ret < 0) {
        std::cerr << "Failed to copy codec parameters to output stream codec parameters for video." << std::endl;
        return ret;
    }

    // 查找音频编码器
    audio_codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!audio_codec) {
        std::cerr << "Audio codec not found." << std::endl;
        return -1;
    }

    // 分配音频编码上下文
    audio_c = avcodec_alloc_context3(audio_codec);
    if (!audio_c) {
        std::cerr << "Could not allocate audio encoding context." << std::endl;
        return AVERROR(ENOMEM);
    }

    // 设置音频参数
    audio_c->bit_rate = 64000;
    audio_c->sample_fmt = AV_SAMPLE_FMT_FLTP;
    audio_c->sample_rate = 44100;
    audio_c->channel_layout = AV_CH_LAYOUT_STEREO;
    audio_c->channels = av_get_channel_layout_nb_channels(audio_c->channel_layout);

    // 添加音频流
    audio_st = avformat_new_stream(oc, audio_codec);
    if (!audio_st) {
        std::cerr << "Failed allocating audio stream." << std::endl;
        return AVERROR_UNKNOWN;
    }
    audio_st->time_base = (AVRational){1, audio_c->sample_rate};

    // 将编码器设置到流中
    ret = avcodec_parameters_from_context(audio_st->codecpar, audio_c);
    if (ret < 0) {
        std::cerr << "Failed to copy codec parameters to output stream codec parameters for audio." << std::endl;
        return ret;
    }

    // 打开编码器
    ret = avcodec_open2(video_c, video_codec, nullptr);
    if (ret < 0) {
        std::cerr << "Could not open video codec." << std::endl;
        return ret;
    }

    ret = avcodec_open2(audio_c, audio_codec, nullptr);
    if (ret < 0) {
        std::cerr << "Could not open audio codec." << std::endl;
        return ret;
    }

    // 写入文件头
    if (!(oc->oformat->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, rtmp_url, AVIO_FLAG_WRITE) < 0) {
            std::cerr << "Could not open '" << rtmp_url << "'" << std::endl;
            return AVERROR_IO;
        }
    }
    ret = avformat_write_header(oc, nullptr);
    if (ret < 0) {
        std::cerr << "Error occurred when opening output file" << std::endl;
        return ret;
    }

    // 主循环
    int64_t last_pts = 0; // 用于同步
    while (true) {
        AVPacketData video_data = {av_frame_alloc(), last_pts};
        AVPacketData audio_data = {av_frame_alloc(), last_pts + 1000 / 25}; // 假设视频帧率为25fps

        // 填充视频帧
        // ...
        // 例如: video_data.frame->data[0] = /* raw video data */;
        //      video_data.frame->linesize[0] = /* line size */;
        //      video_data.frame->width = /* width */;
        //      video_data.frame->height = /* height */;

        // 填充音频帧
        // ...
        // 例如: audio_data.frame->data[0] = /* raw audio data */;
        //      audio_data.frame->nb_samples = /* number of samples */;
        //      audio_data.frame->sample_rate = /* sample rate */;
        //      audio_data.frame->channel_layout = /* channel layout */;
        //      audio_data.frame->format = /* sample format */;

        // 编码并发送视频
        encode_and_send_video(video_c, oc, &video_data);

        // 编码并发送音频
        encode_and_send_audio(audio_c, oc, &audio_data);

        // 更新时间戳
        last_pts += 1000 / 25; // 假设视频帧率为25fps

        // 释放帧
        av_frame_free(&video_data.frame);
        av_frame_free(&audio_data.frame);

        // 模拟延时,以便模拟真实的数据流
        av_usleep(1000 * 1000 / 25); // 每秒25帧
    }

    // 写入尾部
    ret = av_write_trailer(oc);
    if (ret < 0) {
        std::cerr << "Error while writing trailer." << std::endl;
        return ret;
    }

    // 清理
    avformat_free_context(oc);
    avcodec_free_context(&video_c);
    avcodec_free_context(&audio_c);

    return 0;
}

1. 初始化FFmpeg和设备

2. 配置视频和音频编码器

3. 主循环:捕获、编码和推送流

4. 清理资源

//1. 初始化FFmpeg和设备

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}

int main() {
    avdevice_register_all();
    avcodec_register_all();
    av_register_all();
    avformat_network_init();

    const char* out_url = "rtmp://[RTMP服务器地址]/[应用名]/[流名]";
    AVFormatContext* pFormatCtx = avformat_alloc_context();
    AVOutputFormat* fmt = av_guess_format("flv", out_url, NULL);
    pFormatCtx->oformat = fmt;

    // 打开视频设备
    AVFormatContext* video_in_ctx = nullptr;
    AVInputFormat* video_in_fmt = av_find_input_format("v4l2");
    if (avformat_open_input(&video_in_ctx, "/dev/video0", video_in_fmt, NULL) != 0) {
        printf("Couldn't open input video stream.\n");
        return -1;
    }

    // 打开音频设备
    AVFormatContext* audio_in_ctx = nullptr;
    AVInputFormat* audio_in_fmt = av_find_input_format("alsa");
    if (avformat_open_input(&audio_in_ctx, "default", audio_in_fmt, NULL) != 0) {
        printf("Couldn't open input audio stream.\n");
        return -1;
    }
    // ... [接下来的代码]
}

//2. 配置视频和音频编码器
//视频和音频编码器的配置因具体需求而异,这里给出一个基本示例。
    // 查找视频和音频流索引
    int video_stream_index = -1, audio_stream_index = -1;
    for (int i = 0; i < video_in_ctx->nb_streams; i++) {
        if (video_in_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_stream_index = i;
            break;
        }
    }

    for (int i = 0; i < audio_in_ctx->nb_streams; i++) {
        if (audio_in_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audio_stream_index = i;
            break;
        }
    }

    // 打开视频编码器
    AVCodec* video_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    AVCodecContext* video_codec_ctx = avcodec_alloc_context3(video_codec);
    video_codec_ctx->bit_rate = 400000;
    video_codec_ctx->width = 640;
    video_codec_ctx->height = 480;
    video_codec_ctx->time_base = (AVRational){1, 25};
    video_codec_ctx->framerate = (AVRational){25, 1};
    video_codec_ctx->gop_size = 10;
    video_codec_ctx->max_b_frames = 1;
    video_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
    if (avcodec_open2(video_codec_ctx, video_codec, NULL) < 0) {
        printf("Could not open video codec.\n");
        return -1;
    }

    // 打开音频编码器
    AVCodec* audio_codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    AVCodecContext* audio_codec_ctx = avcodec_alloc_context3(audio_codec);
    audio_codec_ctx->bit_rate = 64000;
    audio_codec_ctx->sample_rate = 44100;
    audio_codec_ctx->channel_layout = AV_CH_LAYOUT_STEREO;
    audio_codec_ctx->channels = 2;
    audio_codec_ctx->sample_fmt = audio_codec->sample_fmts[0];
    audio_codec_ctx->time_base = (AVRational){1, audio_codec_ctx->sample_rate};
    if (avcodec_open2(audio_codec_ctx, audio_codec, NULL) < 0) {
        printf("Could not open audio codec.\n");
        return -1;
    }
//3. 主循环:捕获、编码和推送流
//在主循环中,我们从摄像头和麦克风捕获数据,并将其编码后推送到RTMP服务器。
    // 打开RTMP流
    if (avio_open(&pFormatCtx->pb, out_url, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file!\n");
        return -1;
    }

    // 写文件头
    if (avformat_write_header(pFormatCtx, NULL) < 0) {
        printf("Error occurred when opening output file.\n");
        return -1;
    }

    // 初始化转换器
    SwsContext* sws_ctx = sws_getContext(video_codec_ctx->width, video_codec_ctx->height, AV_PIX_FMT_YUV420P,
                                         video_codec_ctx->width, video_codec_ctx->height, AV_PIX_FMT_YUV420P, 
                                         SWS_BILINEAR, NULL, NULL, NULL);
    SwrContext* swr_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_FLTP, 44100,
                                             AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, 44100, 0, NULL);
    swr_init(swr_ctx);

    // 主循环
    while (1) {
        // 从视频或音频设备读取数据
        // ... [处理视频和音频帧的代码]
    }

    // 清理和写文件尾
    // ... [清理资源的代码]
//主循环:处理音视频帧
//在主循环中,我们需要从视频和音频设备中读取帧,然后对这些帧进行编码,最后将它们发送到RTMP服务器。
//3. 主循环
AVPacket pkt;
while (1) {
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    // 读取视频帧
    if (av_read_frame(video_in_ctx, &pkt) >= 0 && pkt.stream_index == video_stream_index) {
        // 将视频帧发送到编码器
        avcodec_send_packet(video_codec_ctx, &pkt);
        while (avcodec_receive_frame(video_codec_ctx, frame) >= 0) {
            // 这里可以对frame进行处理,比如格式转换
            // 编码视频帧
            avcodec_send_frame(video_codec_ctx, frame);
            while (avcodec_receive_packet(video_codec_ctx, &pkt) >= 0) {
                av_interleaved_write_frame(pFormatCtx, &pkt);
                av_packet_unref(&pkt);
            }
        }
    }

    // 读取音频帧
    if (av_read_frame(audio_in_ctx, &pkt) >= 0 && pkt.stream_index == audio_stream_index) {
        // 将音频帧发送到编码器
        avcodec_send_packet(audio_codec_ctx, &pkt);
        while (avcodec_receive_frame(audio_codec_ctx, frame) >= 0) {
            // 这里可以对frame进行处理,比如格式转换
            // 编码音频帧
            avcodec_send_frame(audio_codec_ctx, frame);
            while (avcodec_receive_packet(audio_codec_ctx, &pkt) >= 0) {
                av_interleaved_write_frame(pFormatCtx, &pkt);
                av_packet_unref(&pkt);
            }
        }
    }

    av_packet_unref(&pkt);

    // 可以在这里添加一个退出循环的条件
}



//4 清理资源
avcodec_close(video_codec_ctx);
avcodec_close(audio_codec_ctx);
avformat_close_input(&video_in_ctx);
avformat_close_input(&audio_in_ctx);
avformat_free_context(pFormatCtx);
sws_freeContext(sws_ctx);
swr_free(&swr_ctx);
avformat_network_deinit();
//aliyun 
#include <iostream>
#include <string>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libswscale/swscale.h>
}

const char* camera_dev = "/dev/video0"; // 视频设备
const char* audio_dev = "default";     // 音频设备
const char* rtmp_url = "rtmp://your_server_address/live/your_stream_key";

void log_packet(const AVFormatContext* fmt_ctx, const AVPacket* pkt) {
    AVRational* time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
           pkt->stream_index);
}

int main() {
    av_register_all();
    avformat_network_init();

    AVFormatContext* ifmt_ctx = nullptr;
    if (avformat_open_input(&ifmt_ctx, camera_dev, nullptr, nullptr) < 0) {
        std::cerr << "Cannot open video device\n";
        return -1;
    }
    if (avformat_find_stream_info(ifmt_ctx, nullptr) < 0) {
        std::cerr << "Cannot find stream information\n";
        return -1;
    }

    AVFormatContext* ofmt_ctx = nullptr;
    avformat_alloc_output_context2(&ofmt_ctx, nullptr, "flv", rtmp_url);
    if (!ofmt_ctx) {
        std::cerr << "Cannot allocate output context\n";
        return -1;
    }

    // Copy video & audio stream(s).
    for (unsigned int i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream* in_stream = ifmt_ctx->streams[i];
        AVStream* out_stream = avformat_new_stream(ofmt_ctx, in_stream->codecpar);
        if (!out_stream) {
            std::cerr << "Failed allocating output stream\n";
            return -1;
        }
        avcodec_copy_context(out_stream->codec, in_stream->codec);
        out_stream->codecpar->codec_tag = 0;
    }

    // Open output URL.
    if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
        if (avio_open(&ofmt_ctx->pb, rtmp_url, AVIO_FLAG_WRITE) < 0) {
            std::cerr << "Failed to open output URL\n";
            return -1;
        }
    }

    // Write file header.
    if (avformat_write_header(ofmt_ctx, nullptr) < 0) {
        std::cerr << "Error occurred when opening output file\n";
        return -1;
    }

    // Start streaming loop.
    AVPacket pkt;
    while (1) {
        AVStream* in_stream, *out_stream;
        if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
            in_stream  = ifmt_ctx->streams[pkt.stream_index];
            out_stream = ofmt_ctx->streams[pkt.stream_index];

            log_packet(ifmt_ctx, &pkt);

            // PTS/DTS conversion if needed here.

            pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
            pkt.pos = -1;

            if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
                std::cerr << "Error muxing packet\n";
            }

            av_packet_unref(&pkt);
        } else {
            break;
        }
    }

    // Write trailer.
    av_write_trailer(ofmt_ctx);

    // Close everything.
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    avformat_close_input(&ifmt_ctx);

    return 0;
}

Logo

技术共进,成长同行——讯飞AI开发者社区

更多推荐