Commit b864ce0b authored by Hugo Beauzee-Luyssen's avatar Hugo Beauzee-Luyssen

Corrected crash when no audio has to be rendered

parent 764f89dd
......@@ -28,6 +28,8 @@
#include "Timeline.h"
#include "SettingsManager.h"
uint8_t* WorkflowRenderer::m_silencedAudioBuffer = NULL;
WorkflowRenderer::WorkflowRenderer() :
m_mainWorkflow( MainWorkflow::getInstance() ),
m_stopping( false ),
......@@ -73,8 +75,9 @@ WorkflowRenderer::WorkflowRenderer() :
m_media->addOption( ":imem-cat=2" );
m_media->addOption( ":imem-caching=0" );
sprintf( buffer, ":input-slave=imem://data=%lld:cat=1:codec=s16l:samplerate=48000:channels=2",
(qint64)m_audioEsHandler );
m_nbChannels = 1;
sprintf( buffer, ":input-slave=imem://data=%lld:cat=1:codec=s16l:samplerate=48000:channels=%u",
(qint64)m_audioEsHandler, m_nbChannels );
m_media->addOption( buffer );
m_media->addOption( ":vvvv" );
......@@ -159,11 +162,28 @@ int WorkflowRenderer::lockAudio( WorkflowRenderer* self, int64_t *pts, size
MainWorkflow::OutputBuffers* ret = self->m_mainWorkflow->getSynchroneOutput( MainWorkflow::AudioTrack );
self->m_renderAudioSample = ret->audio;
}
*buffer = self->m_renderAudioSample->buff;
*bufferSize = self->m_renderAudioSample->size;
*pts = (( self->m_audioPts * 1000000 ) / 48000 ) * self->m_renderAudioSample->nbSample;
self->m_audioPts += self->m_renderAudioSample->nbChannels;
qDebug() << "Video buffer size:" << *bufferSize;
if ( self->m_renderAudioSample != NULL )
{
*buffer = self->m_renderAudioSample->buff;
*bufferSize = self->m_renderAudioSample->size;
*pts = (( self->m_audioPts * 1000000 ) / 48000 ) * self->m_renderAudioSample->nbSample;
self->m_audioPts += self->m_renderAudioSample->nbChannels;
}
else
{
//We set the nbSample to 10ms, which is 1/100 of a sec, so we divide the samplerate
//by 100.
unsigned int nbSample = 48000 / 100;
unsigned int buffSize = self->m_nbChannels * 2 * nbSample;
if ( WorkflowRenderer::m_silencedAudioBuffer == NULL )
WorkflowRenderer::m_silencedAudioBuffer = new uint8_t[ buffSize ];
memset( WorkflowRenderer::m_silencedAudioBuffer, 0, buffSize );
*buffer = WorkflowRenderer::m_silencedAudioBuffer;
*bufferSize = buffSize;
*pts = (( self->m_audioPts * 1000000 ) / 48000 ) * nbSample;
self->m_audioPts += self->m_nbChannels;
}
// qDebug() << "Video buffer size:" << *bufferSize;
return 0;
}
......
......@@ -99,6 +99,11 @@ class WorkflowRenderer : public GenericRenderer
private:
unsigned char* m_renderVideoFrame;
/**
* \brief When there's no sound to play, this is the buffer that'll
* be injected
*/
static uint8_t* m_silencedAudioBuffer;
size_t m_videoBuffSize;
AudioClipWorkflow::AudioSample* m_renderAudioSample;
QStack<StackedAction*> m_actions;
......@@ -115,6 +120,8 @@ class WorkflowRenderer : public GenericRenderer
*/
qint64 m_pts;
qint64 m_audioPts;
uint32_t m_nbChannels;
public slots:
virtual void setClip( Clip* ){}
......
......@@ -53,7 +53,10 @@ void* AudioClipWorkflow::getOutput()
QMutexLocker lock( m_renderLock );
if ( isEndReached() == true )
{
qDebug() << "Audio end reached";
return NULL;
}
return m_buffer;
}
......@@ -91,7 +94,8 @@ void AudioClipWorkflow::unlock( AudioClipWorkflow* cw, uint8_t* pcm_buffe
unsigned int nb_samples, unsigned int bits_per_sample,
unsigned int size, qint64 pts )
{
// qDebug() << "pts:" << pts << "nb channels" << channels << "rate:" << rate;
// qDebug() << "pts:" << pts << "nb channels" << channels << "rate:" << rate <<
// "size:" << size << "nb_samples:" << nb_samples;
Q_UNUSED( pcm_buffer );
Q_UNUSED( rate );
Q_UNUSED( bits_per_sample );
......
......@@ -237,7 +237,9 @@ MainWorkflow::OutputBuffers* MainWorkflow::getSynchroneOutput( MainWorkflow::Tr
m_outputBuffers->video = &( m_effectEngine->getOutputFrame( 0 ) );
}
if ( trackType == BothTrackType || trackType == AudioTrack )
{
m_outputBuffers->audio = m_tracks[MainWorkflow::AudioTrack]->getTmpAudioBuffer();
}
m_synchroneRenderWaitConditionMutex->unlock();
return m_outputBuffers;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment