get format from streamreplicator

pull/243/head
mpromonet 3 years ago
parent b38bc70b9b
commit e7d971b8fb

@ -19,6 +19,8 @@
#include <alsa/asoundlib.h>
#include "logger.h"
#include "DeviceInterface.h"
struct ALSACaptureParameters
{
ALSACaptureParameters(const char* devname, const std::list<snd_pcm_format_t> & formatList, unsigned int sampleRate, unsigned int channels, int verbose) :
@ -33,7 +35,7 @@ struct ALSACaptureParameters
int m_verbose;
};
class ALSACapture
class ALSACapture : public DeviceInterface
{
public:
static ALSACapture* createNew(const ALSACaptureParameters & params) ;
@ -47,15 +49,11 @@ class ALSACapture
public:
virtual size_t read(char* buffer, size_t bufferSize);
virtual int getFd();
virtual unsigned long getBufferSize() { return m_bufferSize; }
virtual unsigned long getBufferSize() { return m_bufferSize; };
virtual int getWidth() {return -1;}
virtual int getHeight() {return -1;}
virtual int getCaptureFormat() {return -1;}
unsigned long getSampleRate() { return m_params.m_sampleRate; }
unsigned long getChannels () { return m_params.m_channels; }
snd_pcm_format_t getFormat () { return m_fmt; }
virtual unsigned long getSampleRate() { return m_params.m_sampleRate; }
virtual unsigned long getChannels () { return m_params.m_channels; }
virtual int getAudioFormat () { return m_fmt; }
private:
snd_pcm_t* m_pcm;

@ -19,34 +19,16 @@ class DeviceInterface
{
public:
virtual size_t read(char* buffer, size_t bufferSize) = 0;
virtual int getFd() = 0;
virtual unsigned long getBufferSize() = 0;
virtual int getWidth() = 0;
virtual int getHeight() = 0;
virtual int getCaptureFormat() = 0;
virtual int getFd() = 0;
virtual unsigned long getBufferSize() = 0;
virtual int getWidth() { return -1; }
virtual int getHeight() { return -1; }
virtual int getVideoFormat() { return -1; }
virtual unsigned long getSampleRate() { return -1; }
virtual unsigned long getChannels() { return -1; }
virtual int getAudioFormat() { return -1; }
virtual ~DeviceInterface() {};
};
// -----------------------------------------
// Device Capture Interface template
// -----------------------------------------
template<typename T>
class DeviceCaptureAccess : public DeviceInterface
{
public:
DeviceCaptureAccess(T* device) : m_device(device) {};
virtual ~DeviceCaptureAccess() { delete m_device; };
virtual size_t read(char* buffer, size_t bufferSize) { return m_device->read(buffer, bufferSize); }
virtual int getFd() { return m_device->getFd(); }
virtual unsigned long getBufferSize() { return m_device->getBufferSize(); }
virtual int getWidth() { return m_device->getWidth(); }
virtual int getHeight() { return m_device->getHeight(); }
virtual int getCaptureFormat() { return m_device->getFormat(); }
protected:
T* m_device;
};
#endif

@ -3,9 +3,9 @@
** support, and with no warranty, express or implied, as to its usefulness for
** any purpose.
**
** V4l2DeviceSource.h
** DeviceSource.h
**
** V4L2 live555 source
** live555 source
**
** -------------------------------------------------------------------------*/
@ -24,6 +24,27 @@
#include <liveMedia.hh>
#include "DeviceInterface.h"
#include "V4l2Capture.h"
// -----------------------------------------
// Video Device Capture Interface
// -----------------------------------------
class VideoCaptureAccess : public DeviceInterface
{
public:
VideoCaptureAccess(V4l2Capture* device) : m_device(device) {}
virtual ~VideoCaptureAccess() { delete m_device; }
virtual size_t read(char* buffer, size_t bufferSize) { return m_device->read(buffer, bufferSize); }
virtual int getFd() { return m_device->getFd(); }
virtual unsigned long getBufferSize() { return m_device->getBufferSize(); }
virtual int getWidth() { return m_device->getWidth(); }
virtual int getHeight() { return m_device->getHeight(); }
virtual int getVideoFormat() { return m_device->getFormat(); }
protected:
V4l2Capture* m_device;
};
class V4L2DeviceSource: public FramedSource
{
@ -64,11 +85,9 @@ class V4L2DeviceSource: public FramedSource
public:
static V4L2DeviceSource* createNew(UsageEnvironment& env, DeviceInterface * device, int outputFd, unsigned int queueSize, bool useThread) ;
std::string getAuxLine() { return m_auxLine; };
void setAuxLine(const std::string auxLine) { m_auxLine = auxLine; };
int getWidth() { return m_device->getWidth(); };
int getHeight() { return m_device->getHeight(); };
int getCaptureFormat() { return m_device->getCaptureFormat(); };
std::string getAuxLine() { return m_auxLine; }
void setAuxLine(const std::string auxLine) { m_auxLine = auxLine; }
DeviceInterface* getDevice() { return m_device; }
protected:
V4L2DeviceSource(UsageEnvironment& env, DeviceInterface * device, int outputFd, unsigned int queueSize, bool useThread);

@ -21,19 +21,30 @@ class MulticastServerMediaSubsession : public PassiveServerMediaSubsession , pub
, struct in_addr destinationAddress
, Port rtpPortNum, Port rtcpPortNum
, int ttl
, StreamReplicator* replicator
, const std::string& format);
, StreamReplicator* replicator);
protected:
MulticastServerMediaSubsession(StreamReplicator* replicator, RTPSink* rtpSink, RTCPInstance* rtcpInstance)
: PassiveServerMediaSubsession(*rtpSink, rtcpInstance), BaseServerMediaSubsession(replicator), m_rtpSink(rtpSink) {};
MulticastServerMediaSubsession(UsageEnvironment& env
, struct in_addr destinationAddress
, Port rtpPortNum, Port rtcpPortNum
, int ttl
, StreamReplicator* replicator)
: PassiveServerMediaSubsession(*this->createRtpSink(env, destinationAddress, rtpPortNum, rtcpPortNum, ttl, replicator)
, m_rtcpInstance)
, BaseServerMediaSubsession(replicator) {};
virtual char const* sdpLines() ;
virtual char const* getAuxSDPLine(RTPSink* rtpSink,FramedSource* inputSource);
RTPSink* createRtpSink(UsageEnvironment& env
, struct in_addr destinationAddress
, Port rtpPortNum, Port rtcpPortNum
, int ttl
, StreamReplicator* replicator);
protected:
RTPSink* m_rtpSink;
std::string m_SDPLines;
RTPSink* m_rtpSink;
RTCPInstance* m_rtcpInstance;
std::string m_SDPLines;
};

@ -15,12 +15,15 @@
#include <iomanip>
#include <iostream>
#include <fstream>
#include <sstream>
// live555
#include <liveMedia.hh>
// forward declaration
class V4L2DeviceSource;
#include <linux/videodev2.h>
#include "DeviceSource.h"
#include "ALSACapture.h"
// ---------------------------------
// BaseServerMediaSubsession
@ -28,7 +31,74 @@ class V4L2DeviceSource;
class BaseServerMediaSubsession
{
public:
BaseServerMediaSubsession(StreamReplicator* replicator): m_replicator(replicator) {};
BaseServerMediaSubsession(StreamReplicator* replicator): m_replicator(replicator) {
V4L2DeviceSource* deviceSource = dynamic_cast<V4L2DeviceSource*>(replicator->inputSource());
if (deviceSource) {
DeviceInterface* device = deviceSource->getDevice();
if (device->getVideoFormat() >= 0) {
m_format = BaseServerMediaSubsession::getVideoRtpFormat(device->getVideoFormat());
} else {
m_format = BaseServerMediaSubsession::getAudioRtpFormat(device->getAudioFormat(), device->getSampleRate(), device->getChannels());
}
}
}
// -----------------------------------------
// convert V4L2 pix format to RTP mime
// -----------------------------------------
static std::string getVideoRtpFormat(int format)
{
std::string rtpFormat;
switch(format)
{
case V4L2_PIX_FMT_HEVC : rtpFormat = "video/H265"; break;
case V4L2_PIX_FMT_H264 : rtpFormat = "video/H264"; break;
case V4L2_PIX_FMT_MJPEG: rtpFormat = "video/JPEG"; break;
case V4L2_PIX_FMT_JPEG : rtpFormat = "video/JPEG"; break;
case V4L2_PIX_FMT_VP8 : rtpFormat = "video/VP8" ; break;
case V4L2_PIX_FMT_VP9 : rtpFormat = "video/VP9" ; break;
case V4L2_PIX_FMT_YUYV : rtpFormat = "video/RAW" ; break;
case V4L2_PIX_FMT_UYVY : rtpFormat = "video/RAW" ; break;
}
return rtpFormat;
}
static std::string getAudioRtpFormat(int format, int sampleRate, int channels)
{
std::ostringstream os;
#ifdef HAVE_SOUND
os << "audio/";
switch (format) {
case SND_PCM_FORMAT_A_LAW:
os << "PCMA";
break;
case SND_PCM_FORMAT_MU_LAW:
os << "PCMU";
break;
case SND_PCM_FORMAT_S8:
os << "L8";
break;
case SND_PCM_FORMAT_S24_BE:
case SND_PCM_FORMAT_S24_LE:
os << "L24";
break;
case SND_PCM_FORMAT_S32_BE:
case SND_PCM_FORMAT_S32_LE:
os << "L32";
break;
case SND_PCM_FORMAT_MPEG:
os << "MPEG";
break;
default:
os << "L16";
break;
}
os << "/" << sampleRate << "/" << channels;
#endif
return os.str();
}
public:
static FramedSource* createSource(UsageEnvironment& env, FramedSource * videoES, const std::string& format);
@ -37,5 +107,6 @@ class BaseServerMediaSubsession
protected:
StreamReplicator* m_replicator;
std::string m_format;
};

@ -19,13 +19,13 @@
class TSServerMediaSubsession : public UnicastServerMediaSubsession
{
public:
static TSServerMediaSubsession* createNew(UsageEnvironment& env, StreamReplicator* videoreplicator, const std::string& videoformat, StreamReplicator* audioreplicator, const std::string& audioformat, unsigned int sliceDuration)
static TSServerMediaSubsession* createNew(UsageEnvironment& env, StreamReplicator* videoreplicator, StreamReplicator* audioreplicator, unsigned int sliceDuration)
{
return new TSServerMediaSubsession(env, videoreplicator, videoformat, audioreplicator, audioformat, sliceDuration);
return new TSServerMediaSubsession(env, videoreplicator, audioreplicator, sliceDuration);
}
protected:
TSServerMediaSubsession(UsageEnvironment& env, StreamReplicator* videoreplicator, const std::string& videoformat, StreamReplicator* audioreplicator, const std::string& audioformat, unsigned int sliceDuration);
TSServerMediaSubsession(UsageEnvironment& env, StreamReplicator* videoreplicator, StreamReplicator* audioreplicator, unsigned int sliceDuration);
virtual ~TSServerMediaSubsession();
virtual float getCurrentNPT(void* streamToken);

@ -17,18 +17,16 @@
class UnicastServerMediaSubsession : public OnDemandServerMediaSubsession , public BaseServerMediaSubsession
{
public:
static UnicastServerMediaSubsession* createNew(UsageEnvironment& env, StreamReplicator* replicator, const std::string& format);
static UnicastServerMediaSubsession* createNew(UsageEnvironment& env, StreamReplicator* replicator);
protected:
UnicastServerMediaSubsession(UsageEnvironment& env, StreamReplicator* replicator, const std::string& format)
: OnDemandServerMediaSubsession(env, False), BaseServerMediaSubsession(replicator), m_format(format) {};
UnicastServerMediaSubsession(UsageEnvironment& env, StreamReplicator* replicator)
: OnDemandServerMediaSubsession(env, False), BaseServerMediaSubsession(replicator) {}
virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
virtual char const* getAuxSDPLine(RTPSink* rtpSink,FramedSource* inputSource);
protected:
const std::string m_format;
};

@ -117,92 +117,33 @@ class V4l2RTSPServer {
return m_env;
}
// -----------------------------------------
// convert V4L2 pix format to RTP mime
// -----------------------------------------
static std::string getVideoRtpFormat(int format)
{
std::string rtpFormat;
switch(format)
{
case V4L2_PIX_FMT_HEVC : rtpFormat = "video/H265"; break;
case V4L2_PIX_FMT_H264 : rtpFormat = "video/H264"; break;
case V4L2_PIX_FMT_MJPEG: rtpFormat = "video/JPEG"; break;
case V4L2_PIX_FMT_JPEG : rtpFormat = "video/JPEG"; break;
case V4L2_PIX_FMT_VP8 : rtpFormat = "video/VP8" ; break;
case V4L2_PIX_FMT_VP9 : rtpFormat = "video/VP9" ; break;
case V4L2_PIX_FMT_YUYV : rtpFormat = "video/RAW" ; break;
case V4L2_PIX_FMT_UYVY : rtpFormat = "video/RAW" ; break;
}
return rtpFormat;
}
// -----------------------------------------
// create video capture & replicator
// -----------------------------------------
StreamReplicator* CreateVideoReplicator(
const V4L2DeviceParameters& inParam,
int queueSize, int useThread, int repeatConfig,
const std::string& outputFile, V4l2IoType ioTypeOut, V4l2Output*& out,
std::string& rtpVideoFormat);
const std::string& outputFile, V4l2IoType ioTypeOut, V4l2Output*& out);
#ifdef HAVE_ALSA
// -----------------------------------------
// convert string audio format to pcm
// -----------------------------------------
static std::string getAudioRtpFormat(snd_pcm_format_t format, int sampleRate, int channels)
{
std::ostringstream os;
os << "audio/";
switch (format) {
case SND_PCM_FORMAT_A_LAW:
os << "PCMA";
break;
case SND_PCM_FORMAT_MU_LAW:
os << "PCMU";
break;
case SND_PCM_FORMAT_S8:
os << "L8";
break;
case SND_PCM_FORMAT_S24_BE:
case SND_PCM_FORMAT_S24_LE:
os << "L24";
break;
case SND_PCM_FORMAT_S32_BE:
case SND_PCM_FORMAT_S32_LE:
os << "L32";
break;
case SND_PCM_FORMAT_MPEG:
os << "MPEG";
break;
default:
os << "L16";
break;
}
os << "/" << sampleRate << "/" << channels;
return os.str();
}
StreamReplicator* CreateAudioReplicator(
const std::string& audioDev, const std::list<snd_pcm_format_t>& audioFmtList, int audioFreq, int audioNbChannels, int verbose,
int queueSize, int useThread,
std::string& rtpAudioFormat);
int queueSize, int useThread);
#endif
// -----------------------------------------
// Add unicast Session
// -----------------------------------------
int AddUnicastSession(const std::string& url, StreamReplicator* videoReplicator, const std::string& rtpVideoFormat, StreamReplicator* audioReplicator, const std::string & rtpAudioFormat) {
int AddUnicastSession(const std::string& url, StreamReplicator* videoReplicator, StreamReplicator* audioReplicator) {
// Create Unicast Session
std::list<ServerMediaSubsession*> subSession;
if (videoReplicator)
{
subSession.push_back(UnicastServerMediaSubsession::createNew(*this->env(), videoReplicator, rtpVideoFormat));
subSession.push_back(UnicastServerMediaSubsession::createNew(*this->env(), videoReplicator));
}
if (audioReplicator)
{
subSession.push_back(UnicastServerMediaSubsession::createNew(*this->env(), audioReplicator, rtpAudioFormat));
subSession.push_back(UnicastServerMediaSubsession::createNew(*this->env(), audioReplicator));
}
return this->addSession(url, subSession);
}
@ -210,11 +151,11 @@ class V4l2RTSPServer {
// -----------------------------------------
// Add HLS & MPEG# Session
// -----------------------------------------
int AddHlsSession(const std::string& url, int hlsSegment, StreamReplicator* videoReplicator, const std::string& rtpVideoFormat, StreamReplicator* audioReplicator, const std::string & rtpAudioFormat) {
int AddHlsSession(const std::string& url, int hlsSegment, StreamReplicator* videoReplicator, StreamReplicator* audioReplicator) {
std::list<ServerMediaSubsession*> subSession;
if (videoReplicator)
{
subSession.push_back(TSServerMediaSubsession::createNew(*this->env(), videoReplicator, rtpVideoFormat, audioReplicator, rtpAudioFormat, hlsSegment));
subSession.push_back(TSServerMediaSubsession::createNew(*this->env(), videoReplicator, audioReplicator, hlsSegment));
}
int nbSource = this->addSession(url, subSession);
@ -234,7 +175,7 @@ class V4l2RTSPServer {
// -----------------------------------------
// Add multicats Session
// -----------------------------------------
int AddMulticastSession(const std::string& url, in_addr destinationAddress, unsigned short & rtpPortNum, unsigned short & rtcpPortNum, StreamReplicator* videoReplicator, const std::string& rtpVideoFormat, StreamReplicator* audioReplicator, const std::string & rtpAudioFormat) {
int AddMulticastSession(const std::string& url, in_addr destinationAddress, unsigned short & rtpPortNum, unsigned short & rtcpPortNum, StreamReplicator* videoReplicator, StreamReplicator* audioReplicator) {
LOG(NOTICE) << "RTP address " << inet_ntoa(destinationAddress) << ":" << rtpPortNum;
LOG(NOTICE) << "RTCP address " << inet_ntoa(destinationAddress) << ":" << rtcpPortNum;
@ -242,7 +183,7 @@ class V4l2RTSPServer {
std::list<ServerMediaSubsession*> subSession;
if (videoReplicator)
{
subSession.push_back(MulticastServerMediaSubsession::createNew(*this->env(), destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, videoReplicator, rtpVideoFormat));
subSession.push_back(MulticastServerMediaSubsession::createNew(*this->env(), destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, videoReplicator));
// increment ports for next sessions
rtpPortNum+=2;
rtcpPortNum+=2;
@ -250,7 +191,7 @@ class V4l2RTSPServer {
if (audioReplicator)
{
subSession.push_back(MulticastServerMediaSubsession::createNew(*this->env(), destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, audioReplicator, rtpAudioFormat));
subSession.push_back(MulticastServerMediaSubsession::createNew(*this->env(), destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, audioReplicator));
// increment ports for next sessions
rtpPortNum+=2;

@ -331,41 +331,37 @@ int main(int argc, char** argv)
V4l2Output* out = NULL;
V4L2DeviceParameters inParam(videoDev.c_str(), videoformatList, width, height, fps, ioTypeIn, verbose, openflags);
std::string rtpVideoFormat;
StreamReplicator* videoReplicator = rtspServer.CreateVideoReplicator(
inParam,
queueSize, useThread, repeatConfig,
output, ioTypeOut, out,
rtpVideoFormat);
output, ioTypeOut, out);
if (out != NULL) {
outList.push_back(out);
}
// Init Audio Capture
StreamReplicator* audioReplicator = NULL;
std::string rtpAudioFormat;
#ifdef HAVE_ALSA
audioReplicator = rtspServer.CreateAudioReplicator(
audioDev, audioFmtList, audioFreq, audioNbChannels, verbose,
queueSize, useThread,
rtpAudioFormat);
queueSize, useThread);
#endif
// Create Multicast Session
if (multicast)
{
nbSource += rtspServer.AddMulticastSession(baseUrl+murl, destinationAddress, rtpPortNum, rtcpPortNum, videoReplicator, rtpVideoFormat, audioReplicator, rtpAudioFormat);
nbSource += rtspServer.AddMulticastSession(baseUrl+murl, destinationAddress, rtpPortNum, rtcpPortNum, videoReplicator, audioReplicator);
}
// Create HLS Session
if (hlsSegment > 0)
{
nbSource += rtspServer.AddHlsSession(baseUrl+tsurl, hlsSegment, videoReplicator, rtpVideoFormat, audioReplicator, rtpAudioFormat);
nbSource += rtspServer.AddHlsSession(baseUrl+tsurl, hlsSegment, videoReplicator, audioReplicator);
}
// Create Unicast Session
nbSource += rtspServer.AddUnicastSession(baseUrl+url, videoReplicator, rtpVideoFormat, audioReplicator, rtpAudioFormat);
nbSource += rtspServer.AddUnicastSession(baseUrl+url, videoReplicator, audioReplicator);
}
if (nbSource>0)

@ -17,12 +17,20 @@ MulticastServerMediaSubsession* MulticastServerMediaSubsession::createNew(UsageE
, struct in_addr destinationAddress
, Port rtpPortNum, Port rtcpPortNum
, int ttl
, StreamReplicator* replicator
, const std::string& format)
, StreamReplicator* replicator)
{
return new MulticastServerMediaSubsession(env, destinationAddress, rtpPortNum, rtcpPortNum, ttl , replicator);
}
RTPSink* MulticastServerMediaSubsession::createRtpSink(UsageEnvironment& env
, struct in_addr destinationAddress
, Port rtpPortNum, Port rtcpPortNum
, int ttl
, StreamReplicator* replicator) {
// Create a source
FramedSource* source = replicator->createStreamReplica();
FramedSource* videoSource = createSource(env, source, format);
FramedSource* videoSource = createSource(env, source, m_format);
// Create RTP/RTCP groupsock
#if LIVEMEDIA_LIBRARY_VERSION_INT < 1607644800
@ -33,24 +41,24 @@ MulticastServerMediaSubsession* MulticastServerMediaSubsession::createNew(UsageE
((struct sockaddr_in&)groupAddress).sin_addr = destinationAddress;
#endif
Groupsock* rtpGroupsock = new Groupsock(env, groupAddress, rtpPortNum, ttl);
Groupsock* rtcpGroupsock = new Groupsock(env, groupAddress, rtcpPortNum, ttl);
// Create a RTP sink
RTPSink* videoSink = createSink(env, rtpGroupsock, 96, format, dynamic_cast<V4L2DeviceSource*>(replicator->inputSource()));
m_rtpSink = createSink(env, rtpGroupsock, 96, m_format, dynamic_cast<V4L2DeviceSource*>(replicator->inputSource()));
// Create 'RTCP instance'
const unsigned maxCNAMElen = 100;
unsigned char CNAME[maxCNAMElen+1];
gethostname((char*)CNAME, maxCNAMElen);
CNAME[maxCNAMElen] = '\0';
RTCPInstance* rtcpInstance = RTCPInstance::createNew(env, rtcpGroupsock, 500, CNAME, videoSink, NULL);
Groupsock* rtcpGroupsock = new Groupsock(env, groupAddress, rtcpPortNum, ttl);
m_rtcpInstance = RTCPInstance::createNew(env, rtcpGroupsock, 500, CNAME, m_rtpSink, NULL);
// Start Playing the Sink
videoSink->startPlaying(*videoSource, NULL, NULL);
return new MulticastServerMediaSubsession(replicator, videoSink, rtcpInstance);
m_rtpSink->startPlaying(*videoSource, NULL, NULL);
return m_rtpSink;
}
char const* MulticastServerMediaSubsession::sdpLines()
{
if (m_SDPLines.empty())

@ -79,12 +79,13 @@ RTPSink* BaseServerMediaSubsession::createSink(UsageEnvironment& env, Groupsock
else if (format =="video/RAW")
{
std::string sampling;
switch (source->getCaptureFormat()) {
DeviceInterface* device = source->getDevice();
switch (device->getVideoFormat()) {
case V4L2_PIX_FMT_YUV444: sampling = "YCbCr-4:4:4"; break;
case V4L2_PIX_FMT_YUYV: sampling = "YCbCr-4:2:2"; break;
case V4L2_PIX_FMT_UYVY: sampling = "YCbCr-4:2:2"; break;
}
videoSink = RawVideoRTPSink::createNew(env, rtpGroupsock, rtpPayloadTypeIfDynamic, source->getWidth(), source->getHeight(), 8, sampling.c_str(),"BT709-2");
videoSink = RawVideoRTPSink::createNew(env, rtpGroupsock, rtpPayloadTypeIfDynamic, device->getWidth(), device->getHeight(), 8, sampling.c_str(),"BT709-2");
}
#endif
else if (format.find("audio/L16") == 0)
@ -112,9 +113,10 @@ char const* BaseServerMediaSubsession::getAuxLine(V4L2DeviceSource* source, RTPS
}
else if (source) {
unsigned char rtpPayloadType = rtpSink->rtpPayloadType();
DeviceInterface* device = source->getDevice();
os << "a=fmtp:" << int(rtpPayloadType) << " " << source->getAuxLine() << "\r\n";
int width = source->getWidth();
int height = source->getHeight();
int width = device->getWidth();
int height = device->getHeight();
if ( (width > 0) && (height>0) ) {
os << "a=x-dimensions:" << width << "," << height << "\r\n";
}

@ -10,31 +10,31 @@
#include "TSServerMediaSubsession.h"
#include "AddH26xMarkerFilter.h"
TSServerMediaSubsession::TSServerMediaSubsession(UsageEnvironment& env, StreamReplicator* videoreplicator, const std::string& videoformat, StreamReplicator* audioreplicator, const std::string& audioformat, unsigned int sliceDuration)
: UnicastServerMediaSubsession(env, videoreplicator, "video/MP2T"), m_slice(0)
TSServerMediaSubsession::TSServerMediaSubsession(UsageEnvironment& env, StreamReplicator* videoreplicator, StreamReplicator* audioreplicator, unsigned int sliceDuration)
: UnicastServerMediaSubsession(env, videoreplicator), m_slice(0)
{
// Create a source
FramedSource* source = videoreplicator->createStreamReplica();
MPEG2TransportStreamFromESSource* muxer = MPEG2TransportStreamFromESSource::createNew(env);
if (videoformat == "video/H264") {
if (m_format == "video/H264") {
// add marker
FramedSource* filter = new AddH26xMarkerFilter(env, source);
// mux to TS
muxer->addNewVideoSource(filter, 5);
} else if (videoformat == "video/H265") {
} else if (m_format == "video/H265") {
// add marker
FramedSource* filter = new AddH26xMarkerFilter(env, source);
// mux to TS
muxer->addNewVideoSource(filter, 6);
}
if (audioformat == "audio/MPEG") {
if (m_format == "audio/MPEG") {
// mux to TS
muxer->addNewAudioSource(source, 1);
}
FramedSource* tsSource = createSource(env, muxer, m_format);
FramedSource* tsSource = createSource(env, muxer, "video/MP2T");
// Start Playing the HLS Sink
m_hlsSink = MemoryBufferSink::createNew(env, OutPacketBuffer::maxSize, sliceDuration);

@ -14,9 +14,9 @@
// -----------------------------------------
// ServerMediaSubsession for Unicast
// -----------------------------------------
UnicastServerMediaSubsession* UnicastServerMediaSubsession::createNew(UsageEnvironment& env, StreamReplicator* replicator, const std::string& format)
UnicastServerMediaSubsession* UnicastServerMediaSubsession::createNew(UsageEnvironment& env, StreamReplicator* replicator)
{
return new UnicastServerMediaSubsession(env,replicator,format);
return new UnicastServerMediaSubsession(env,replicator);
}
FramedSource* UnicastServerMediaSubsession::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate)

@ -26,8 +26,7 @@
StreamReplicator* V4l2RTSPServer::CreateVideoReplicator(
const V4L2DeviceParameters& inParam,
int queueSize, int useThread, int repeatConfig,
const std::string& outputFile, V4l2IoType ioTypeOut, V4l2Output*& out,
std::string& rtpVideoFormat) {
const std::string& outputFile, V4l2IoType ioTypeOut, V4l2Output*& out) {
StreamReplicator* videoReplicator = NULL;
std::string videoDev(inParam.m_devName);
@ -54,13 +53,13 @@ StreamReplicator* V4l2RTSPServer::CreateVideoReplicator(
}
}
rtpVideoFormat.assign(V4l2RTSPServer::getVideoRtpFormat(videoCapture->getFormat()));
std::string rtpVideoFormat(BaseServerMediaSubsession::getVideoRtpFormat(videoCapture->getFormat()));
if (rtpVideoFormat.empty()) {
LOG(FATAL) << "No Streaming format supported for device " << videoDev;
delete videoCapture;
} else {
LOG(NOTICE) << "Create Source ..." << videoDev;
videoReplicator = DeviceSourceFactory::createStreamReplicator(this->env(), videoCapture->getFormat(), new DeviceCaptureAccess<V4l2Capture>(videoCapture), queueSize, useThread, outfd, repeatConfig);
videoReplicator = DeviceSourceFactory::createStreamReplicator(this->env(), videoCapture->getFormat(), new VideoCaptureAccess(videoCapture), queueSize, useThread, outfd, repeatConfig);
if (videoReplicator == NULL)
{
LOG(FATAL) << "Unable to create source for device " << videoDev;
@ -173,8 +172,7 @@ std::string getV4l2Alsa(const std::string& v4l2device) {
StreamReplicator* V4l2RTSPServer::CreateAudioReplicator(
const std::string& audioDev, const std::list<snd_pcm_format_t>& audioFmtList, int audioFreq, int audioNbChannels, int verbose,
int queueSize, int useThread,
std::string& rtpAudioFormat) {
int queueSize, int useThread) {
StreamReplicator* audioReplicator = NULL;
if (!audioDev.empty())
{
@ -188,9 +186,7 @@ StreamReplicator* V4l2RTSPServer::CreateAudioReplicator(
ALSACapture* audioCapture = ALSACapture::createNew(param);
if (audioCapture)
{
rtpAudioFormat.assign(V4l2RTSPServer::getAudioRtpFormat(audioCapture->getFormat(),audioCapture->getSampleRate(), audioCapture->getChannels()));
audioReplicator = DeviceSourceFactory::createStreamReplicator(this->env(), 0, new DeviceCaptureAccess<ALSACapture>(audioCapture), queueSize, useThread);
audioReplicator = DeviceSourceFactory::createStreamReplicator(this->env(), 0, audioCapture, queueSize, useThread);
if (audioReplicator == NULL)
{
LOG(FATAL) << "Unable to create source for device " << audioDevice;

Loading…
Cancel
Save