2012-09-14 212 views
2

我需要實現允許連接到兩種類型的客戶端的服務器。第一種類型必須將實時視頻和音頻傳輸到服務器。第二種類型必須從服務器傳輸此視頻。我選擇了視頻的H.264編碼,音頻的Vorbis ogg編碼。我想使用RTSP協議將視頻從第一類客戶端傳輸到服務器。我使用ffmpeg中的「libavformat」實現了客戶端部分。我的代碼如下所示。通過RTSP傳輸視頻和音頻

#include "v_out_video_stream.h" 

#include <stdint.h> 
#ifdef __cplusplus 
extern "C" { 
#endif 
#include <libavformat/avformat.h> 
#include <libavutil/opt.h> 
#include <libavutil/avstring.h> 
#include <libavformat/avio.h> 
#ifdef __cplusplus 
} 
#endif 
#include <stdexcept> 

struct VStatticRegistrar 
{ 
    VStatticRegistrar() 
    { 
     av_register_all(); 
     avformat_network_init(); 
    } 
}; 

VStatticRegistrar __registrar; 

struct VOutVideoStream::Private 
{ 
    AVFormatContext * m_context; 
    int m_audioStreamIndex; 
    int m_videoStreamIndex; 

    int m_videoBitrate; 
    int m_width; 
    int m_height; 
    int m_fps; 
    int m_audioSamplerate; 
}; 

VOutVideoStream::VOutVideoStream(int videoBitrate, int width, int height, int fps, int audioSamplerate) 
{ 
    d = new Private(); 
    d->m_videoBitrate = videoBitrate; 
    d->m_width = width; 
    d->m_height = height; 
    d->m_fps = fps; 
    d->m_audioSamplerate = audioSamplerate; 
    d->m_context = 0; 
    d->m_audioStreamIndex = -1; 
    d->m_videoStreamIndex = -1; 
} 

bool VOutVideoStream::connectToServer(const std::string& rtp_address, int rtp_port) 
{ 
    assert(! d->m_context); 

    // initalize the AV context 
    d->m_context = avformat_alloc_context(); 
    if(!d->m_context) 
     return false; 
    // get the output format 
    d->m_context->oformat = av_guess_format("rtsp", NULL, NULL); 
    if(! d->m_context->oformat) 
     return false; 

    // try to open the RTSP stream 
    snprintf(d->m_context->filename, sizeof(d->m_context->filename), "rtsp://%s:%d", rtp_address.c_str(), rtp_port); 
    if(avio_open(&d->m_context->pb, d->m_context->filename, AVIO_FLAG_WRITE) < 0) 
     return false; 

    // add an H.264 stream 
    AVStream *stream = avformat_new_stream(d->m_context, NULL); 
    if (! stream) 
     return false; 
    // initalize codec 
    AVCodecContext* codec = stream->codec; 
    if(d->m_context->oformat->flags & AVFMT_GLOBALHEADER) 
     codec->flags |= CODEC_FLAG_GLOBAL_HEADER; 
    codec->codec_id = CODEC_ID_H264; 
    codec->codec_type = AVMEDIA_TYPE_VIDEO; 
    //codec->bit_rate = d->m_videoBitrate; 
    codec->width = d->m_width; 
    codec->height = d->m_height; 
    codec->time_base.den = d->m_fps; 
    codec->time_base.num = 1; 
    d->m_audioStreamIndex = stream->index; 

    stream = avformat_new_stream(d->m_context, NULL); 
    if (! stream) 
     return false; 
    // initalize codec 
    codec = stream->codec; 
    if(d->m_context->oformat->flags & AVFMT_GLOBALHEADER) 
     codec->flags |= CODEC_FLAG_GLOBAL_HEADER; 
    codec->codec_id = CODEC_ID_VORBIS; 
    codec->codec_type = AVMEDIA_TYPE_AUDIO; 
    codec->sample_fmt = AV_SAMPLE_FMT_S16; 
    codec->channels = 2; 
    codec->bit_rate = d->m_audioSamplerate * codec->channels * 16; 
    codec->sample_rate = d->m_audioSamplerate; 
    d->m_videoStreamIndex = stream->index; 
    // write the header 
    return avformat_write_header(d->m_context, NULL) == 0; 
} 

void VOutVideoStream::disconnect() 
{ 
    assert(d->m_context); 

    avio_close(d->m_context->pb); 
    avformat_free_context(d->m_context); 
    d->m_context = 0; 
} 

VOutVideoStream::~VOutVideoStream() 
{ 
    if(d->m_context) 
     disconnect(); 
    delete d; 
} 

bool VOutVideoStream::send(VNetworkAbstractionLayer& nal) 
{ 
    AVPacket p; 
    av_init_packet(&p); 
    p.data = nal.getPayload(); 
    p.size = nal.getSize(); 
    p.stream_index = nal.getType() == VNetworkAbstractionLayer::AUDIO_PACKET ? d->m_audioStreamIndex : 
                       d->m_videoStreamIndex; 
    return av_write_frame(d->m_context, &p) >= 0; 
} 

VNetworkAbstractionLayer如此定義:

#ifndef _V_NETWORK_ABSTRACTION_LAYER_H_ 
#define _V_NETWORK_ABSTRACTION_LAYER_H_ 

#include <cs/v_cs_global.h> 

#include <stdint.h> 
#include <cstring> 
#include <boost/noncopyable.hpp> 
#include <boost/enable_shared_from_this.hpp> 

class VNetworkAbstractionLayer : public boost::enable_shared_from_this<VNetworkAbstractionLayer>, 
           private boost::noncopyable 
{ 
public: 
    enum PacketType 
    { 
     AUDIO_PACKET, 
     VIDEO_PACKET 
    }; 

    ~VNetworkAbstractionLayer() { 
     delete[] m_payload; 
    } 

    static VNetworkAbstractionLayerPtr factory(int size, const uint8_t* payload, PacketType type) { 
     return VNetworkAbstractionLayerPtr(new VNetworkAbstractionLayer(size, payload, type)); 
    } 

    uint8_t* getPayload() { 
     return m_payload; 
    } 
    int getSize() const { 
     return m_size; 
    } 
    PacketType getType() const { 
     return m_type; 
    } 

private: 
    VNetworkAbstractionLayer(int size, const uint8_t* payload, PacketType type) : 
     m_size(size), 
     m_payload(new uint8_t[ size ]), 
     m_type(type) 
    { 
     memcpy(m_payload, payload, size); 
    } 

    int m_size; 
    uint8_t *m_payload; 
    PacketType m_type; 
}; 


#endif // _V_NETWORK_ABSTRACTION_LAYER_H_ 

現在我需要實現服務器。但是我還沒有在libavformat中找到任何'listen'方法。任何人都可以解釋我如何實現RTSP服務器。可能我可以使用任何其他庫嗎?

回答

0

從頭開始編寫RTSP/RTCP/RTP堆棧非常複雜。你可以看看在C++中實現這種堆棧的live555庫。它適用於ffmpeg/libav