795 lines
25 KiB
C
795 lines
25 KiB
C
/*
|
|
*
|
|
* Singe 2
|
|
* Copyright (C) 2006-2024 Scott Duensing <scott@kangaroopunch.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version 3
|
|
* of the License, or (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
* 02110-1301, USA.
|
|
*
|
|
*/
|
|
|
|
|
|
#include "include/SDL2/SDL_mixer.h"
|
|
#include "../thirdparty/ffms2/include/ffms.h"
|
|
#include "../thirdparty/uthash/src/uthash.h"
|
|
#include "../thirdparty/ffmpeg/libavformat/avformat.h"
|
|
#include "../thirdparty/ffmpeg/libavutil/dict.h"
|
|
|
|
|
|
#include "util.h"
|
|
#include "videoPlayer.h"
|
|
|
|
// This is kinda ugly but it lets us use the language
|
|
// data from VLC without changing their files.
|
|
#define VLC_API
|
|
#define N_(x) x
|
|
typedef struct iso639_lang_t iso639_lang_t;
|
|
#include "../thirdparty/vlc/vlc_iso_lang.h"
|
|
#include "../thirdparty/vlc/iso-639_def.h"
|
|
#undef N_
|
|
#undef VLC_API
|
|
|
|
|
|
#define AUDIO_STREAM_LOW_WATERMARK (24 * 1024)
|
|
#define AUDIO_SAMPLE_PREREAD 1024
|
|
#define AUDIO_SILENCE_SECONDS 2
|
|
|
|
|
|
typedef struct AudioStreamS {
|
|
FFMS_AudioSource *audioSource;
|
|
const FFMS_AudioProperties *audioProps;
|
|
char *language;
|
|
} AudioStreamT;
|
|
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wpadded"
|
|
typedef struct VideoPlayerS {
|
|
int32_t id;
|
|
bool playing;
|
|
bool resetTime;
|
|
byte *audioBuffer;
|
|
byte audioSampleBytes;
|
|
byte *audioSilenceRaw;
|
|
char errMsg[1024];
|
|
int32_t volumeLeft;
|
|
int32_t volumeRight;
|
|
int32_t videoTrack;
|
|
int32_t audioSampleSize;
|
|
int32_t mixSampleSize;
|
|
int32_t audioSilenceChannel;
|
|
int64_t frame;
|
|
int64_t audioBufferSize;
|
|
int64_t frameDeltaTime;
|
|
int64_t lastFrameTime;
|
|
int64_t timestamp;
|
|
int64_t audioDelta;
|
|
int64_t audioPosition;
|
|
int64_t framesPlayed;
|
|
Uint16 audioFormat;
|
|
Uint32 lastTickTime;
|
|
Uint32 audioSilenceSize;
|
|
//double audioAdjustment;
|
|
Mix_Chunk *silenceChunk;
|
|
SDL_AudioStream *audioStream;
|
|
SDL_Texture *videoTexture;
|
|
FFMS_ErrorInfo errInfo;
|
|
int32_t currentAudioTrack;
|
|
int32_t audioSourceCount;
|
|
AudioStreamT *audio;
|
|
FFMS_VideoSource *videoSource;
|
|
const FFMS_VideoProperties *videoProps;
|
|
const FFMS_Frame *propFrame;
|
|
const FFMS_TrackTimeBase *videoTimeBase;
|
|
const FFMS_Frame *frameData;
|
|
const FFMS_FrameInfo *frameInfo;
|
|
UT_hash_handle hh;
|
|
} VideoPlayerT;
|
|
#pragma GCC diagnostic pop
|
|
|
|
|
|
FFMS_Index *_createIndex(char *filename, char *indexPath, bool hasVideo, bool hasAudio, VideoPlayerT *v);
|
|
void _dequeueVideoAudio(int channel, void *stream, int len, void *udata); // Callback. Not changing ints.
|
|
int FFMS_CC _indexCallBack(int64_t Current, int64_t Total, void *ICPrivate); // Callback. Not changing int.
|
|
int32_t _loadVideoAndAudio(char *vFilename, char *aFilename, char *indexPath, bool stretchVideo, SDL_Renderer *renderer);
|
|
|
|
|
|
static videoIndexingCallback _indexingFunction = NULL;
|
|
static VideoPlayerT *_videoPlayerHash = NULL;
|
|
static int32_t _nextId = 0;
|
|
static int32_t _mixRate = -1;
|
|
static Uint8 _mixChannels = 0;
|
|
static SDL_AudioFormat _mixFormat = 0;
|
|
|
|
|
|
void _dequeueVideoAudio(int channel, void *stream, int bytes, void *udata) { // Callback. Not changing ints.
|
|
VideoPlayerT *v = (VideoPlayerT *)udata;
|
|
int32_t bytesToCopy = bytes;
|
|
int32_t available = SDL_AudioStreamAvailable(v->audioStream);
|
|
int32_t remainder = 0;
|
|
int32_t bytesRead = 0;
|
|
int32_t i = 0;
|
|
Sint16 *data = stream;
|
|
|
|
(void)channel;
|
|
|
|
// Don't copy more than we have room for
|
|
if (bytesToCopy > available) {
|
|
bytesToCopy = available;
|
|
}
|
|
|
|
// Ensure we only copy complete samples (Is this needed?)
|
|
remainder = bytesToCopy % v->audioSampleSize;
|
|
bytesToCopy -= remainder;
|
|
|
|
//utilSay("B: %d R: %d W: %ld", bytes, remainder, SDL_AudioStreamAvailable(v->audioStream));
|
|
|
|
// Read audio data
|
|
bytesRead = SDL_AudioStreamGet(v->audioStream, stream, bytesToCopy);
|
|
if (bytesRead < 0) utilDie("%s", SDL_GetError());
|
|
|
|
// We do our own volume per channel here in the mixer
|
|
if (_mixChannels < 2) {
|
|
// Mono output, average volume levels together
|
|
Mix_Volume(channel, (int32_t)((float)MIX_MAX_VOLUME * ((float)v->volumeLeft * (float)v->volumeRight / (float)2) * (float)0.01));
|
|
} else {
|
|
// Stereo output. Assumes MIX_DEFAULT_FORMAT for now.
|
|
Mix_Volume(channel, MIX_MAX_VOLUME);
|
|
for (i=0; i<bytesRead / 2; i+=2) {
|
|
data[i] = (Sint16)((float)data[i] * (float)v->volumeLeft * (float)0.01);
|
|
data[i + 1] = (Sint16)((float)data[i + 1] * (float)v->volumeRight * (float)0.01);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
int FFMS_CC _indexCallBack(int64_t current, int64_t total, void *ICPrivate) { // Callback. Not changing int.
|
|
static int32_t lastPercent = 0;
|
|
int32_t thisPercent = 0;
|
|
VideoPlayerT *v = (VideoPlayerT *)ICPrivate;
|
|
|
|
(void)v;
|
|
|
|
if ((current == 0) && (total == 0)) {
|
|
lastPercent = 0; // Reset
|
|
} else {
|
|
thisPercent = (int32_t)((double)current / (double)total * 100.0);
|
|
if (thisPercent != lastPercent) {
|
|
lastPercent = thisPercent;
|
|
// GUI
|
|
if (_indexingFunction) {
|
|
_indexingFunction(thisPercent);
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
FFMS_Index *_createIndex(char *filename, char *indexPath, bool hasVideo, bool hasAudio, VideoPlayerT *v) {
|
|
char *indexName = NULL;
|
|
FFMS_Index *index = NULL;
|
|
FFMS_Indexer *indexer = NULL;
|
|
|
|
// Index file
|
|
indexName = utilCreateString("%s%c%s.index", indexPath, utilGetPathSeparator(), utilGetLastPathComponent(filename));
|
|
utilFixPathSeparators(&indexName, false);
|
|
index = FFMS_ReadIndex(indexName, &v->errInfo);
|
|
if (index) {
|
|
if (FFMS_IndexBelongsToFile(index, filename, &v->errInfo)) {
|
|
FFMS_DestroyIndex(index);
|
|
index = NULL;
|
|
}
|
|
}
|
|
if (!index) {
|
|
indexer = FFMS_CreateIndexer(filename, &v->errInfo);
|
|
if (indexer == NULL) utilDie("%s", v->errInfo.Buffer);
|
|
if (hasAudio) FFMS_TrackTypeIndexSettings(indexer, FFMS_TYPE_AUDIO, 1, 0);
|
|
if (hasVideo) FFMS_TrackTypeIndexSettings(indexer, FFMS_TYPE_VIDEO, 1, 0);
|
|
_indexCallBack(0, 0, v);
|
|
FFMS_SetProgressCallback(indexer, _indexCallBack, v);
|
|
index = FFMS_DoIndexing2(indexer, FFMS_IEH_ABORT, &v->errInfo);
|
|
if (index == NULL) utilDie("%s", v->errInfo.Buffer);
|
|
if (FFMS_WriteIndex(indexName, index, &v->errInfo)) utilDie("%s", v->errInfo.Buffer);
|
|
}
|
|
free(indexName);
|
|
|
|
return index;
|
|
}
|
|
|
|
|
|
int32_t _loadVideoAndAudio(char *vFilename, char *aFilename, char *indexPath, bool stretchVideo, SDL_Renderer *renderer) {
|
|
int32_t pixelFormats[2];
|
|
int32_t result = -1;
|
|
FFMS_Index *vIndex = NULL;
|
|
FFMS_Index *aIndex = NULL;
|
|
VideoPlayerT *v = NULL;
|
|
int32_t x = 0;
|
|
int32_t count = 0;
|
|
FFMS_Track *track = NULL;
|
|
int32_t ttype = FFMS_TYPE_UNKNOWN;
|
|
AVFormatContext *fmt_ctx = NULL;
|
|
AVDictionaryEntry *tag = NULL;
|
|
|
|
// Create new videoPlayer
|
|
v = calloc(1, sizeof(VideoPlayerT));
|
|
if (!v) utilDie("Unable to allocate new video player.");
|
|
|
|
// Set some starting values
|
|
v->audioSourceCount = 0;
|
|
v->currentAudioTrack = -1;
|
|
v->videoTrack = -1;
|
|
v->audioSilenceChannel = -1;
|
|
v->playing = false; // Start paused
|
|
v->errInfo.Buffer = v->errMsg;
|
|
v->errInfo.BufferSize = sizeof(v->errMsg);
|
|
v->errInfo.ErrorType = FFMS_ERROR_SUCCESS;
|
|
v->errInfo.SubType = FFMS_ERROR_SUCCESS;
|
|
|
|
if (aFilename) {
|
|
vIndex = _createIndex(vFilename, indexPath, true, false, v);
|
|
aIndex = _createIndex(aFilename, indexPath, false, true, v);
|
|
} else {
|
|
vIndex = _createIndex(vFilename, indexPath, true, true, v);
|
|
aIndex = vIndex;
|
|
aFilename = vFilename;
|
|
}
|
|
|
|
// Find video track
|
|
v->videoTrack = FFMS_GetFirstTrackOfType(vIndex, FFMS_TYPE_VIDEO, &v->errInfo);
|
|
if (v->videoTrack < 0) utilDie("%s", v->errInfo.Buffer);
|
|
v->videoSource = FFMS_CreateVideoSource(vFilename, v->videoTrack, vIndex, -1, FFMS_SEEK_NORMAL, &v->errInfo);
|
|
if (v->videoSource == NULL) utilDie("%s", v->errInfo.Buffer);
|
|
|
|
// Get video properties
|
|
v->videoProps = FFMS_GetVideoProperties(v->videoSource);
|
|
v->propFrame = FFMS_GetFrame(v->videoSource, 0, &v->errInfo);
|
|
if (v->propFrame == NULL) utilDie("%s", v->errInfo.Buffer);
|
|
v->videoTimeBase = FFMS_GetTimeBase(FFMS_GetTrackFromVideo(v->videoSource));
|
|
|
|
// Set up output video format
|
|
pixelFormats[0] = FFMS_GetPixFmt("bgra");
|
|
pixelFormats[1] = -1;
|
|
if (FFMS_SetOutputFormatV2(v->videoSource, pixelFormats, v->propFrame->EncodedWidth, v->propFrame->EncodedHeight, FFMS_RESIZER_BICUBIC, &v->errInfo)) utilDie("%s", v->errInfo.Buffer);
|
|
|
|
// Find audio track(s)
|
|
for (x=0; x<FFMS_GetNumTracks(aIndex); x++) {
|
|
track = FFMS_GetTrackFromIndex(aIndex, x);
|
|
ttype = FFMS_GetTrackType(track);
|
|
// Just count the tracks for now.
|
|
if (ttype == FFMS_TYPE_AUDIO) count++;
|
|
}
|
|
if (count > 0) {
|
|
// Allocate space for the tracks.
|
|
v->audio = (AudioStreamT *)calloc(count, sizeof(AudioStreamT));
|
|
// Now create them.
|
|
for (x=0; x<FFMS_GetNumTracks(aIndex); x++) {
|
|
if (FFMS_GetTrackType(FFMS_GetTrackFromIndex(aIndex, x)) == FFMS_TYPE_AUDIO) {
|
|
v->audio[v->audioSourceCount].audioSource = FFMS_CreateAudioSource(aFilename, x, aIndex, FFMS_DELAY_FIRST_VIDEO_TRACK, &v->errInfo);
|
|
if (v->audio[v->audioSourceCount].audioSource == NULL) utilDie("%s", v->errInfo.Buffer);
|
|
v->audio[v->audioSourceCount].audioProps = FFMS_GetAudioProperties(v->audio[v->audioSourceCount].audioSource);
|
|
v->audioSourceCount++;
|
|
}
|
|
}
|
|
// Use ffmpeg directly to figure out language IDs for audio tracks
|
|
if (avformat_open_input(&fmt_ctx, aFilename, NULL, NULL) >= 0) {
|
|
count = 0;
|
|
for (x=0; x<fmt_ctx->nb_streams; x++) {
|
|
if (fmt_ctx->streams[x]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
|
|
tag = NULL;
|
|
while ((tag = (AVDictionaryEntry *)av_dict_iterate(fmt_ctx->streams[x]->metadata, tag))) {
|
|
if (utilStricmp("language", tag->key) == 0) {
|
|
v->audio[count++].language = strdup(tag->value);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
avformat_close_input(&fmt_ctx);
|
|
}
|
|
// Current audio track.
|
|
v->currentAudioTrack = 0;
|
|
}
|
|
|
|
// Indicies are now part of audioSource & videoSource, so release these
|
|
FFMS_DestroyIndex(vIndex);
|
|
vIndex = NULL;
|
|
if ((aFilename != vFilename) && (aIndex)) {
|
|
FFMS_DestroyIndex(aIndex);
|
|
}
|
|
aIndex = NULL;
|
|
|
|
// Create video texture
|
|
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
|
|
v->videoTexture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_BGRA32, SDL_TEXTUREACCESS_TARGET, v->propFrame->EncodedWidth, v->propFrame->EncodedHeight);
|
|
if (v->videoTexture == NULL) utilDie("%s", SDL_GetError());
|
|
if (!stretchVideo) {
|
|
//***TODO*** Is this the best place for this? What if we have videos of multiple sizes?
|
|
SDL_RenderSetLogicalSize(renderer, v->propFrame->EncodedWidth, v->propFrame->EncodedHeight);
|
|
}
|
|
|
|
// Do we have audio? All audio streams must have the same format!
|
|
if (v->audioSourceCount > 0) {
|
|
// Determine audio format
|
|
switch (v->audio[0].audioProps->SampleFormat) {
|
|
case FFMS_FMT_U8:
|
|
v->audioFormat = AUDIO_U8;
|
|
v->audioSampleBytes = 1;
|
|
break;
|
|
case FFMS_FMT_S16:
|
|
v->audioFormat = AUDIO_S16SYS;
|
|
v->audioSampleBytes = 2;
|
|
break;
|
|
case FFMS_FMT_S32:
|
|
v->audioFormat = AUDIO_S32SYS;
|
|
v->audioSampleBytes = 4;
|
|
break;
|
|
case FFMS_FMT_FLT:
|
|
v->audioFormat = AUDIO_F32SYS;
|
|
v->audioSampleBytes = 4;
|
|
break;
|
|
default:
|
|
utilDie("Unknown audio sample format.");
|
|
break;
|
|
}
|
|
if (v->audio[0].audioProps->Channels > 2) utilDie("Only mono and stereo audio are supported.");
|
|
|
|
// Create audio stream to convert audio to our desired format
|
|
v->audioStream = SDL_NewAudioStream(v->audioFormat, (Uint8)v->audio[0].audioProps->Channels, v->audio[0].audioProps->SampleRate, _mixFormat, _mixChannels, _mixRate);
|
|
if (!v->audioStream) utilDie("%s", SDL_GetError());
|
|
|
|
// Create a buffer to read audio into before conversion
|
|
v->mixSampleSize = SDL_AUDIO_BITSIZE(_mixFormat) / 8 * _mixChannels;
|
|
v->audioSampleSize = v->audioSampleBytes * v->audio[0].audioProps->Channels;
|
|
v->audioBufferSize = v->audioSampleSize * AUDIO_SAMPLE_PREREAD;
|
|
v->audioBuffer = (byte *)malloc((size_t)v->audioBufferSize * sizeof(byte));
|
|
if (!v->audioBuffer) utilDie("Unable to allocate %ld byte audio buffer.", (size_t)v->audioBufferSize * sizeof(byte));
|
|
|
|
// Create a block of silent audio to overlay with video stream audio
|
|
v->audioSilenceSize = (Uint32)(_mixRate * SDL_AUDIO_BITSIZE(_mixFormat) / 8 * AUDIO_SILENCE_SECONDS);
|
|
v->audioSilenceRaw = (byte *)calloc(1, (size_t)v->audioSilenceSize * sizeof(byte));
|
|
if (!v->audioSilenceRaw) utilDie("Unable to allocate %ld silence buffer.", v->audioSilenceSize);
|
|
|
|
// Load silent audio
|
|
v->silenceChunk = Mix_QuickLoad_RAW(v->audioSilenceRaw, v->audioSilenceSize);
|
|
if (!v->silenceChunk) utilDie("%s", Mix_GetError());
|
|
|
|
// Start silent audio playback & immediately pause it
|
|
v->audioSilenceChannel = Mix_PlayChannel(-1, v->silenceChunk, -1);
|
|
if (v->audioSilenceChannel < 0) utilDie("%s", Mix_GetError());
|
|
|
|
// Register effect to provide video stream audio on this channel
|
|
Mix_RegisterEffect(v->audioSilenceChannel, _dequeueVideoAudio, NULL, v);
|
|
}
|
|
|
|
// Default volume, in percent
|
|
v->volumeLeft = 100;
|
|
v->volumeRight = 100;
|
|
|
|
/*
|
|
utilSay("Frames: %d (%dx%d) Audio Samples: %ld (%d Hz) %d Channel%s",
|
|
v->videoProps->NumFrames,
|
|
v->propFrame->EncodedWidth,
|
|
v->propFrame->EncodedHeight,
|
|
v->audioProps->NumSamples,
|
|
v->audioProps->SampleRate,
|
|
v->audioProps->Channels,
|
|
v->audioProps->Channels == 1 ? "" : "s"
|
|
);
|
|
*/
|
|
|
|
// Add to player hash
|
|
v->id = _nextId;
|
|
HASH_ADD_INT(_videoPlayerHash, id, v);
|
|
result = _nextId++;
|
|
|
|
return result;
|
|
}
|
|
|
|
|
|
int32_t videoInit(void) {
|
|
|
|
int32_t channels = _mixChannels;
|
|
|
|
// Start FFMS
|
|
FFMS_Init(0, 0);
|
|
|
|
// Fetch mixer settings
|
|
if (!Mix_QuerySpec(&_mixRate, &_mixFormat, &channels)) utilDie("%s", Mix_GetError());
|
|
_mixChannels = (Uint8)channels;
|
|
|
|
// Volume only works with MIX_DEFAULT_FORMAT
|
|
if (_mixFormat != MIX_DEFAULT_FORMAT) utilDie("videoInit: Only MIX_DEFAULT_FORMAT audio is supported.");
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoIsPlaying(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoIsPlaying.", playerHandle);
|
|
|
|
return v->playing;
|
|
}
|
|
|
|
|
|
int32_t videoGetAudioTrack(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetAudioTrack.", playerHandle);
|
|
|
|
return v->currentAudioTrack;
|
|
}
|
|
|
|
|
|
int32_t videoGetAudioTracks(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetAudioTracks.", playerHandle);
|
|
|
|
return v->audioSourceCount;
|
|
}
|
|
|
|
|
|
int64_t videoGetFrame(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetFrame.", playerHandle);
|
|
|
|
return v->frame;
|
|
}
|
|
|
|
|
|
int64_t videoGetFrameCount(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetFrameCount.", playerHandle);
|
|
|
|
return v->videoProps->NumFrames;
|
|
|
|
}
|
|
|
|
|
|
char *videoGetLanguage(int32_t playerHandle, int32_t audioTrack) {
|
|
VideoPlayerT *v = NULL;
|
|
static char *u = "unk"; // Unknown Language
|
|
char *r = u;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetHeight.", playerHandle);
|
|
|
|
if ((audioTrack >= 0) && (audioTrack < v->audioSourceCount)) {
|
|
r = v->audio[audioTrack].language;
|
|
if ((r == NULL) || (strlen(r) != 3)) {
|
|
r = u;
|
|
}
|
|
} else {
|
|
utilDie("Invalid audio track in videoSetAudioTrack.");
|
|
}
|
|
|
|
return r;
|
|
}
|
|
|
|
|
|
char *videoGetLanguageDescription(char *languageCode) {
|
|
static char *u = "Unknown";
|
|
char *r = u;
|
|
int32_t i = 0;
|
|
|
|
while (p_languages[i].psz_eng_name != NULL) {
|
|
if ((utilStricmp(languageCode, (char *)p_languages[i].psz_iso639_1) == 0) ||
|
|
(utilStricmp(languageCode, (char *)p_languages[i].psz_iso639_2T) == 0) ||
|
|
(utilStricmp(languageCode, (char *)p_languages[i].psz_iso639_2B) == 0)) {
|
|
r = (char *)p_languages[i].psz_eng_name;
|
|
break;
|
|
}
|
|
i++;
|
|
}
|
|
|
|
return r;
|
|
}
|
|
|
|
|
|
int32_t videoGetHeight(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetHeight.", playerHandle);
|
|
|
|
return v->propFrame->EncodedHeight;
|
|
}
|
|
|
|
|
|
int32_t videoGetWidth(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetWidth.", playerHandle);
|
|
|
|
return v->propFrame->EncodedWidth;
|
|
}
|
|
|
|
|
|
int32_t videoGetVolume(int32_t playerHandle, int32_t *leftPercent, int32_t *rightPercent) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoGetVolume.", playerHandle);
|
|
|
|
if (leftPercent != NULL) *leftPercent = v->volumeLeft;
|
|
if (rightPercent != NULL) *rightPercent = v->volumeRight;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoLoad(char *filename, char *indexPath, bool stretchVideo, SDL_Renderer *renderer) {
|
|
return _loadVideoAndAudio(filename, NULL, indexPath, stretchVideo, renderer);
|
|
}
|
|
|
|
|
|
int32_t videoLoadWithAudio(char *vFilename, char *aFilename, char *indexPath, bool stretchVideo, SDL_Renderer *renderer) {
|
|
return _loadVideoAndAudio(vFilename, aFilename, indexPath, stretchVideo, renderer);
|
|
}
|
|
|
|
|
|
int32_t videoPause(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoPause.", playerHandle);
|
|
|
|
v->playing = false;
|
|
v->resetTime = true;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoPlay(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoPlay.", playerHandle);
|
|
|
|
v->playing = true;
|
|
v->resetTime = true;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoQuit(void) {
|
|
|
|
VideoPlayerT *v = NULL;
|
|
VideoPlayerT *t = NULL;
|
|
|
|
// Unload any remaining videos
|
|
HASH_ITER(hh, _videoPlayerHash, v, t) {
|
|
videoUnload(v->id);
|
|
}
|
|
|
|
FFMS_Deinit();
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoSeek(int32_t playerHandle, int64_t seekFrame) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoSeek.", playerHandle);
|
|
|
|
while (seekFrame >= v->videoProps->NumFrames) {
|
|
seekFrame -= v->videoProps->NumFrames;
|
|
}
|
|
while (seekFrame < 0) {
|
|
seekFrame += v->videoProps->NumFrames;
|
|
}
|
|
|
|
v->frame = seekFrame;
|
|
v->resetTime = true;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoSetAudioTrack(int32_t playerHandle, int32_t track) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoSetAudioTrack.", playerHandle);
|
|
|
|
if ((track >= 0) && (track < v->audioSourceCount)) {
|
|
v->currentAudioTrack = track;
|
|
} else {
|
|
utilDie("Invalid audio track in videoSetAudioTrack.");
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoSetIndexCallback(videoIndexingCallback callback) {
|
|
_indexingFunction = callback;
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoSetVolume(int32_t playerHandle, int32_t leftPercent, int32_t rightPercent) {
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoSetVolume.", playerHandle);
|
|
|
|
v->volumeLeft = leftPercent;
|
|
v->volumeRight = rightPercent;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoUnload(int32_t playerHandle) {
|
|
VideoPlayerT *v = NULL;
|
|
int32_t x = 0;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoStop.", playerHandle);
|
|
|
|
if (v->audioSourceCount > 0) {
|
|
Mix_HaltChannel(v->audioSilenceChannel);
|
|
Mix_UnregisterEffect(v->audioSilenceChannel, _dequeueVideoAudio);
|
|
Mix_FreeChunk(v->silenceChunk);
|
|
free(v->audioSilenceRaw);
|
|
SDL_FreeAudioStream(v->audioStream);
|
|
free(v->audioBuffer);
|
|
for (x=0; x<v->audioSourceCount; x++) {
|
|
if (v->audio[x].language) free(v->audio[x].language);
|
|
FFMS_DestroyAudioSource(v->audio[x].audioSource);
|
|
}
|
|
free(v->audio);
|
|
}
|
|
|
|
FFMS_DestroyVideoSource(v->videoSource);
|
|
SDL_DestroyTexture(v->videoTexture);
|
|
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wcast-align"
|
|
HASH_DEL(_videoPlayerHash, v);
|
|
#pragma GCC diagnostic pop
|
|
free(v);
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int32_t videoUpdate(int32_t playerHandle, SDL_Texture **texture) {
|
|
int32_t result = -1;
|
|
int64_t count = 0;
|
|
int64_t threshold = 0;
|
|
VideoPlayerT *v = NULL;
|
|
|
|
// Get our player structure
|
|
HASH_FIND_INT(_videoPlayerHash, &playerHandle, v);
|
|
if (!v) utilDie("No video player at index %d in videoUpdate.", playerHandle);
|
|
|
|
// Audio drift limit
|
|
threshold = v->audio[v->currentAudioTrack].audioSource ? (v->audio[v->currentAudioTrack].audioProps->SampleRate / 2) : 99999;
|
|
|
|
// Handle video frames (and time)
|
|
//if ((SDL_GetTicks() - v->lastTickTime >= v->frameDeltaTime) || (v->audioDelta > threshold) || v->resetTime) {
|
|
if ((SDL_GetTicks() - v->lastTickTime >= v->frameDeltaTime) || v->resetTime) {
|
|
|
|
if (v->frameData) {
|
|
SDL_UpdateTexture(v->videoTexture, NULL, v->frameData->Data[0], v->frameData->Linesize[0]);
|
|
}
|
|
|
|
*texture = v->videoTexture;
|
|
result = v->frame;
|
|
v->framesPlayed++;
|
|
|
|
v->frameData = FFMS_GetFrame(v->videoSource, v->frame, &v->errInfo);
|
|
if (v->frameData == NULL) utilDie("%s", v->errInfo.Buffer);
|
|
v->frameInfo = FFMS_GetFrameInfo(FFMS_GetTrackFromVideo(v->videoSource), v->frame);
|
|
v->timestamp = (int64_t)((double)v->frameInfo->PTS * (double)v->videoTimeBase->Num / (double)v->videoTimeBase->Den); // Convert to milliseconds
|
|
v->frameDeltaTime = (v->timestamp - v->lastFrameTime); // - (v->audioAdjustment * v->framesPlayed);
|
|
v->lastFrameTime = v->timestamp;
|
|
|
|
if (v->playing) {
|
|
if (++v->frame >= v->videoProps->NumFrames) {
|
|
v->frame = 0;
|
|
v->timestamp = 0;
|
|
v->resetTime = true;
|
|
}
|
|
}
|
|
|
|
v->lastTickTime = SDL_GetTicks();
|
|
|
|
if (v->resetTime) {
|
|
if (v->audioSourceCount > 0) {
|
|
SDL_AudioStreamClear(v->audioStream);
|
|
v->audioPosition = (int64_t)((double)v->timestamp * 0.001 * (double)v->audio[v->currentAudioTrack].audioProps->SampleRate);
|
|
v->audioDelta = 0;
|
|
}
|
|
v->lastTickTime = 0;
|
|
v->frameDeltaTime = 0;
|
|
v->resetTime = false;
|
|
v->framesPlayed = 0;
|
|
}
|
|
}
|
|
|
|
// Handle audio samples
|
|
if (v->audioSourceCount > 0) {
|
|
// Add more samples to queue?
|
|
if ((v->playing) && (SDL_AudioStreamAvailable(v->audioStream) < AUDIO_STREAM_LOW_WATERMARK) && (v->audioPosition < v->audio[v->currentAudioTrack].audioProps->NumSamples)) {
|
|
// Maximum samples we can read at a time
|
|
count = AUDIO_SAMPLE_PREREAD;
|
|
// Don't read past end of audio data
|
|
if (v->audioPosition + count >= v->audio[v->currentAudioTrack].audioProps->NumSamples) {
|
|
count = v->audio[v->currentAudioTrack].audioProps->NumSamples - v->audioPosition - 1;
|
|
}
|
|
// Are we reading anything?
|
|
if (count > 0) {
|
|
// Get audio from video stream
|
|
if (FFMS_GetAudio(v->audio[v->currentAudioTrack].audioSource, v->audioBuffer, v->audioPosition, count, &v->errInfo)) utilDie("%s", v->errInfo.Buffer);
|
|
// Feed it to the mixer stream
|
|
if (SDL_AudioStreamPut(v->audioStream, v->audioBuffer, (int32_t)(count * v->audioSampleSize)) < 0) utilDie("%s", SDL_GetError());
|
|
v->audioPosition += count;
|
|
}
|
|
}
|
|
|
|
// Used to determine if we should play two frames rapidly to catch up to audio
|
|
v->audioDelta = labs((long)(v->audioPosition - (int64_t)((double)v->timestamp * 0.001 * (double)v->audio[v->currentAudioTrack].audioProps->SampleRate)));
|
|
|
|
// Did we trip the audio sync compensation?
|
|
if (v->audioDelta > threshold) {
|
|
v->frameDeltaTime *= 0.5;
|
|
//utilSay("Adjusting delta %f", v->frameDeltaTime);
|
|
/*
|
|
// Adjust frame rate to try and match
|
|
if (v->audioDelta > 0) {
|
|
v->audioAdjustment += 0.000001;
|
|
} else {
|
|
v->audioAdjustment -= 0.000001;
|
|
}
|
|
*/
|
|
}
|
|
//utilSay("D %ld T %ld A %f F %f", v->audioDelta, threshold, v->audioAdjustment, v->frameDeltaTime);
|
|
}
|
|
|
|
return result;
|
|
}
|