[Live-devel] OnDemandLiveStream-Server for h264 media

Christian Brümmer itf-freak at gmx.de
Wed May 23 09:03:21 PDT 2012


Hi,

i wrote my own Subclass of FramedSource and 
OnDemandServerMediaSubsession to stream a h264 video encoded by 
libav(ffmpeg). I used these classes the same way 
testOnDemandRTSPServer.cpp does (as you can see in my main). When i try 
to connect via vlc to the rtsp server my framed source gets created and 
after that destroyed directly (deliverFrame0() and doGetNextFrame() are 
not being called).

I dont know what im doing wrong so here is my code:

imLiveStreamSource.cpp // derivec from framedsource
###################################################

#include "imLiveStreamSource.h"
#include <GroupsockHelper.hh> // for gettimeofday()

EventTriggerId imLiveStreamSource::eventTriggerId = 0;
unsigned imLiveStreamSource::mReferenceCount = 0;

imLiveStreamSource* imLiveStreamSource::createNew(UsageEnvironment& env, 
imLiveStreamParameters params)
{
     return new imLiveStreamSource(env, params);
}

imLiveStreamSource::imLiveStreamSource(UsageEnvironment& env, 
imLiveStreamParameters param)
     : FramedSource(env),
     mReady(true),
     mParameters(param),
     mEncodedVideoFrame(NULL),
     mEncodedVideoFrameSize(0),
//    mIOService(new boost::asio::io_service()),
//    mWork(new boost::asio::io_service::work(*mIOService)),
//    mTimer(*mIOService),
     mEncodingEnabled(true),
     mNextEncodedVideoFrameWanted(false)
{
     if(mReferenceCount == 0)
     {

         av_register_all();
         mOutputFormat = av_guess_format(NULL, "test.h264", NULL);

         if(!mOutputFormat)
         {
             std::cout << "Cannot guess output format! Using mpeg!" << 
std::endl;
             mOutputFormat = av_guess_format("mpeg", NULL, NULL);
         }
         if(!mOutputFormat)
         {
             std::cout << "Could not find suitable output format." << 
std::endl;
             mReady = false;
         }

         mContext = avformat_alloc_context();
         if(!mContext)
         {
             std::cout << "Cannot allocate avformat memory." << std::endl;
             mReady = false;
         }
         mContext->oformat = mOutputFormat;

         mVideoStream = NULL;
         mOutputFormat->audio_codec = CODEC_ID_NONE;
         mVideoStream = addVideoStream(mContext, 
mOutputFormat->video_codec);

         if(mVideoStream)
             openVideo(mContext, mVideoStream);

         for (int x = 0; x < NUMBER_OF_THREADS; x++)
         {
             
//mWorkerThreads.create_thread(boost::bind(&imLiveStreamSource::workerThread, 
this));
         }

         
//mTimer.expires_from_now(boost::posix_time::seconds((int)(1/mParameters.mFrameRate)));
         
//mTimer.async_wait(boost::bind(&imLiveStreamSource::encodingThread, 
this, _1));
     }
     ++mReferenceCount;

     // TODO: local init stuff


     if(eventTriggerId == 0)
     {
         eventTriggerId = 
envir().taskScheduler().createEventTrigger(deliverFrame0);
     }
}

imLiveStreamSource::~imLiveStreamSource()
{
   // Any instance-specific 'destruction' (i.e., resetting) of the 
device would be done here:
   //%%% TO BE WRITTEN %%%

     --mReferenceCount;
     if(mReferenceCount == 0)
     {
         //! Free video encoding stuff
         if(mVideoStream)
             closeVideo(mContext, mVideoStream);
         for(int i = 0; i < mContext->nb_streams; i++)
         {
             av_freep(&mContext->streams[i]->codec);
             av_freep(&mContext->streams[i]);
         }
         av_free(mContext);

         //! Video streaming stuff
         envir().taskScheduler().deleteEventTrigger(eventTriggerId);
         eventTriggerId = 0;
     }
}

void imLiveStreamSource::doGetNextFrame()
{
   // This function is called (by our 'downstream' object) when it asks 
for new data.

   // Note: If, for some reason, the source device stops being readable 
(e.g., it gets closed), then you do the following:
   if(!mReady)
   {
     handleClosure(this);
     return;
   }

   // If a new frame of data is immediately available to be delivered, 
then do this now:
   if (mNextEncodedVideoFrame) {
       write_video_frame(mContext, mVideoStream);
     deliverFrame();
   }
   else
       mNextEncodedVideoFrameWanted = true;

   // No new data is immediately available to be delivered.  We don't do 
anything more here.
   // Instead, our event trigger must be called (e.g., from a separate 
thread) when new data becomes available.
}

void imLiveStreamSource::deliverFrame0(void* clientData)
{
   ((imLiveStreamSource*)clientData)->deliverFrame();
}

void imLiveStreamSource::deliverFrame()
{

   if (!isCurrentlyAwaitingData()) return; // we're not ready for the 
data yet

   u_int8_t* newFrameDataStart = mEncodedVideoFrame;
   unsigned int newFrameSize = (int)(mEncodedVideoFrameSize);

   // Deliver the data here:
   if (newFrameSize > fMaxSize) {
     fFrameSize = fMaxSize;
     fNumTruncatedBytes = newFrameSize - fMaxSize;
   } else {
     fFrameSize = newFrameSize;
   }
   gettimeofday(&fPresentationTime, NULL); // If you have a more 
accurate time - e.g., from an encoder - then use that instead.
   // If the device is *not* a 'live source' (e.g., it comes instead 
from a file or buffer), then set "fDurationInMicroseconds" here.
   memmove(fTo, newFrameDataStart, fFrameSize);
   mNextEncodedVideoFrame = false;

   // After delivering the data, inform the reader that it is now available:
   FramedSource::afterGetting(this);
}

######################################################

imLiveStreamMediaSubsession.cpp //derived from OnDemandServerMediaSubsession

######################################################

imLiveStreamMediaSubsession::imLiveStreamMediaSubsession(UsageEnvironment& 
env, char const* fileName, Boolean reuseFirstSource)
   : OnDemandServerMediaSubsession(env, reuseFirstSource)
{
}

imLiveStreamMediaSubsession::~imLiveStreamMediaSubsession()
{
}

imLiveStreamMediaSubsession* 
imLiveStreamMediaSubsession::createNew(UsageEnvironment& env, char 
const* fileName, Boolean reuseFirstSource)
{
   return new imLiveStreamMediaSubsession(env, fileName, reuseFirstSource);
}

FramedSource* 
imLiveStreamMediaSubsession::createNewStreamSource(unsigned 
clientSessionId, unsigned& estBitrate)
{
   estBitrate = 400; // kbps, estimate ??

   imLiveStreamParameters param;
   param.mBitRate = 400000;
   param.mCodec = "x264";
   param.mFrameRate = 24;
   param.mHeight = 480;
   param.mWidth = 800;
   // Create a framer for the Video Elementary Stream:
   return imLiveStreamSource::createNew(envir(), param);
}

RTPSink* imLiveStreamMediaSubsession::createNewRTPSink(Groupsock* 
rtpGroupsock,
                                   unsigned char rtpPayloadTypeIfDynamic,
                                   FramedSource* /*inputSource*/)
{
     return H264VideoRTPSink::createNew(envir(), rtpGroupsock, 
rtpPayloadTypeIfDynamic);
}


######################################################

main.cpp

######################################################

int main(int argc, char** argv) {
   // Begin by setting up our usage environment:
   TaskScheduler* scheduler = BasicTaskScheduler::createNew();
   env = BasicUsageEnvironment::createNew(*scheduler);

   UserAuthenticationDatabase* authDB = NULL;

   // Create the RTSP server:
   RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
   if (rtspServer == NULL) {
     *env << "Failed to create RTSP server: " << env->getResultMsg() << 
"\n";
     exit(1);
   }

   char const* descriptionString
     = "Session streamed by \"INGAme\"";

   // A H.264 video elementary stream:
   {
     char const* streamName = "h264ESVideoTest";
     char const* inputFileName = "test.264";

     ServerMediaSession* sms = ServerMediaSession::createNew(*env, 
streamName, streamName, descriptionString);
     sms->addSubsession(imLiveStreamMediaSubsession::createNew(*env, 
inputFileName, reuseFirstSource));
     rtspServer->addServerMediaSession(sms);

     announceStream(rtspServer, sms, streamName, inputFileName);
   }

   // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
   // Try first with the default HTTP port (80), and then with the 
alternative HTTP
   // port numbers (8000 and 8080).

   if (rtspServer->setUpTunnelingOverHTTP(80) || 
rtspServer->setUpTunnelingOverHTTP(8000) || 
rtspServer->setUpTunnelingOverHTTP(8080)) {
     *env << "\n(We use port " << rtspServer->httpServerPortNum() << " 
for optional RTSP-over-HTTP tunneling.)\n";
   } else {
     *env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
   }

   env->taskScheduler().doEventLoop(); // does not return

   return 0; // only to prevent compiler warning
}

static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms,
                char const* streamName, char const* inputFileName) {
   char* url = rtspServer->rtspURL(sms);
   UsageEnvironment& env = rtspServer->envir();
   env << "\n\"" << streamName << "\" stream, from the file \""
<< inputFileName << "\"\n";
   env << "Play this stream using the URL \"" << url << "\"\n";
   delete[] url;
}

#########################################

Thank you for your time reading this!

Christian


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.live555.com/pipermail/live-devel/attachments/20120523/b9b9f7db/attachment.html>


More information about the live-devel mailing list