diff --git a/.gitignore b/.gitignore index 427a1344..e08b9c55 100644 --- a/.gitignore +++ b/.gitignore @@ -12,8 +12,10 @@ *.la *.a -build +build* dist +install* +test/**/test*[.wav|.avi|.mov|.h264|.mxf] # CMake CMakeCache.txt diff --git a/.travis.yml b/.travis.yml index fb5b4bd7..fa780ef6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,23 +25,28 @@ env: - DEPENDENCY_LOG_FILE=${TRAVIS_BUILD_DIR}/build-dependencies-log.txt - YASM_VERSION=1.3.0 - - LAME_VERSION=3.99.5 - - FAAC_VERSION=1.28 + - LAME_VERSION=3.100 + # - FAAC_VERSION=1.28 - XVID_VERSION=1.3.3 - - FDKAAC_VERSION=0.1.3 + # - FDKAAC_VERSION=0.1.3 - OGG_VERSION=1.3.2 - - VORBIS_VERSION=1.3.4 - - THEORA_VERSION=1.1.1 + - VORBIS_VERSION=1.3.6 + # - THEORA_VERSION=1.1.1 - VPX_VERSION=1.4.0 matrix: - - DEPENDENCY_NAME=libav DEPENDENCY_VERSION=11.3 ENABLE_COVERAGE=true - - DEPENDENCY_NAME=libav DEPENDENCY_VERSION=11.3 ENABLE_COVERAGE=false - - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.4.2 ENABLE_COVERAGE=true - - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.4.2 ENABLE_COVERAGE=false - - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.5.7 ENABLE_COVERAGE=false - - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.6.8 ENABLE_COVERAGE=false - - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.7.6 ENABLE_COVERAGE=false - - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.8.6 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=libav DEPENDENCY_VERSION=11.12 ENABLE_COVERAGE=true + # - DEPENDENCY_NAME=libav DEPENDENCY_VERSION=11.12 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=libav DEPENDENCY_VERSION=12.3 ENABLE_COVERAGE=true + # - DEPENDENCY_NAME=libav DEPENDENCY_VERSION=12.3 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.4.2 ENABLE_COVERAGE=true + # - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.4.2 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.5.7 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.6.8 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.7.6 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.8.6 ENABLE_COVERAGE=false + # - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=4.0 ENABLE_COVERAGE=false + - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=4.1 ENABLE_COVERAGE=false + - DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=4.2 ENABLE_COVERAGE=true matrix: exclude: @@ -57,6 +62,7 @@ matrix: env: DEPENDENCY_NAME=ffmpeg DEPENDENCY_VERSION=2.4.2 ENABLE_COVERAGE=true allow_failures: # build with libav + - os: osx - env: DEPENDENCY_NAME=libav DEPENDENCY_VERSION=11.3 ENABLE_COVERAGE=true - env: DEPENDENCY_NAME=libav DEPENDENCY_VERSION=11.3 ENABLE_COVERAGE=false # build with ffmpeg-2.8.6 @@ -75,8 +81,10 @@ addons: packages: - cmake - swig - - python-dev - - python-nose + - python3-dev + - python3 + - python3-nose + - python3-coverage - freeglut3-dev cache: @@ -96,11 +104,12 @@ before_script: # install coverage tools - if [ ${ENABLE_COVERAGE} ]; then ./tools/travis/gcc.install.coverage.sh; fi + +script: # install avtranscoder dependencies - if [ ${TRAVIS_OS_NAME} = "linux" ]; then ./tools/travis/linux.install.deps.sh; fi - if [ ${TRAVIS_OS_NAME} = "osx" ]; then ./tools/travis/osx.install.deps.sh; fi -script: # build - ./tools/travis/build.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index 65da65f2..3fa384b1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,15 @@ cmake_minimum_required(VERSION 2.8.11) project(AvTranscoder) +# All libraries will be put in INSTALL_PREFIX/lib +# RPATH of host points INSTALL_PREFIX/lib +# see: http://www.cmake.org/Wiki/CMake_RPATH_handling +set(CMAKE_MACOSX_RPATH 1) +set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) +set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") +set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) + + # Define AvTranscoder default path to profiles add_definitions(-DAVTRANSCODER_DEFAULT_AVPROFILES="${CMAKE_INSTALL_PREFIX}/share/avprofiles") diff --git a/README.md b/README.md index f31fd5b8..c2d9effc 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,9 @@ Based on FFmpeg/Libav libraries to support various video and audio formats, avTr [![Build status](https://ci.appveyor.com/api/projects/status/6urf0otyhtj8xuny?svg=true)](https://ci.appveyor.com/project/cchampet/avtranscoder) [![Coverage Status](https://coveralls.io/repos/avTranscoder/avTranscoder/badge.svg)](https://coveralls.io/r/avTranscoder/avTranscoder) Coverity Scan Build Status -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/avTranscoder/avtranscoder/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + -[![Stories in Progress](https://badge.waffle.io/avTranscoder/avTranscoder.svg?label=2 - Working&title=In Progress)](http://waffle.io/avTranscoder/avTranscoder) + Click on the badge above to have a big picture view of what's in progress and how you can help. :warning: The latest avTranscoder API does not fit with libav. @@ -18,7 +18,7 @@ Click on the badge above to have a big picture view of what's in progress and ho #### The basics * avTranscoder is a C++ library. * avTranscoder uses [SWIG](http://www.swig.org/) to generate __Java__ and __Python__ bindings. -* avTranscoder is multiplateform (Linux, MAC, Windows). +* avTranscoder is multiplatform (Linux, MAC, Windows). * avTranscoder could be based on [Libav](https://libav.org/), [FFmpeg](https://ffmpeg.org/), or your custom fork of one of these librairies. #### License diff --git a/app/CMakeLists.txt b/app/CMakeLists.txt index 83ed81df..02945a9b 100644 --- a/app/CMakeLists.txt +++ b/app/CMakeLists.txt @@ -1,8 +1,10 @@ # C++ apps +add_subdirectory(avAudioPhaseMeter) add_subdirectory(avInfo) add_subdirectory(avMeta) add_subdirectory(avPlayer) add_subdirectory(avProcessor) +add_subdirectory(customEncoder) # Python apps add_subdirectory(pyProcessor) diff --git a/app/avAudioPhaseMeter/CMakeLists.txt b/app/avAudioPhaseMeter/CMakeLists.txt new file mode 100644 index 00000000..68946eb9 --- /dev/null +++ b/app/avAudioPhaseMeter/CMakeLists.txt @@ -0,0 +1,25 @@ +### cpp/avProcessor + +# Load custom cmake utilities +set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) +include(AvTranscoderMacros) + +# Build app +add_executable(avaudiophasemeter avAudioPhaseMeter.cpp) +set_target_properties(avaudiophasemeter PROPERTIES VERSION ${AVTRANSCODER_VERSION}) +target_link_libraries(avaudiophasemeter avtranscoder-shared) + +# Install app +if(WIN32) + set_target_properties(avaudiophasemeter PROPERTIES OUTPUT_NAME "avaudiophasemeter-${AVTRANSCODER_VERSION}") + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avaudiophasemeter.exe" "${CMAKE_CURRENT_BINARY_DIR}/avaudiophasemeter-${AVTRANSCODER_VERSION}.exe") +else() + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avaudiophasemeter" "${CMAKE_CURRENT_BINARY_DIR}/avaudiophasemeter-${AVTRANSCODER_VERSION}") +endif() + +install( + FILES ${BINARY_FILES} + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_READ WORLD_READ WORLD_EXECUTE + DESTINATION "bin/" + OPTIONAL +) diff --git a/app/avAudioPhaseMeter/avAudioPhaseMeter.cpp b/app/avAudioPhaseMeter/avAudioPhaseMeter.cpp new file mode 100644 index 00000000..e018f785 --- /dev/null +++ b/app/avAudioPhaseMeter/avAudioPhaseMeter.cpp @@ -0,0 +1,148 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +std::vector parseConfigFile(const std::string& configFilename) +{ + std::vector result; + + std::ifstream configFile(configFilename.c_str(), std::ifstream::in); + + std::string line; + size_t countLines = 0; + while(std::getline(configFile, line)) + { + if(++countLines > 2) + { + throw std::runtime_error("Audio phase analysis can only be done on a stereo pair, i.e. two channels (see usage)."); + } + + std::istringstream is_line(line); + std::string filename; + if(std::getline(is_line, filename, '=')) + { + std::string stream; + std::getline(is_line, stream); + + std::stringstream ss(stream); + size_t streamIndex = 0; + char separator; + int channelIndex = -1; + ss >> streamIndex; + ss >> separator; + if(separator == '.') + { + ss >> channelIndex; + } + + bool newInputDescAdded = false; + // if we already have an input description with the same filename/streamIndex, add only the new channelIndex + for(std::vector::iterator it = result.begin(); it != result.end(); ++it) + { + if(it->_filename == filename && it->_streamIndex == streamIndex) + { + it->_channelIndexArray.push_back(channelIndex); + newInputDescAdded = true; + break; + } + } + if(!newInputDescAdded) + { + result.push_back(avtranscoder::InputStreamDesc(filename, streamIndex, channelIndex)); + } + } + } + + configFile.close(); + + return result; +} + +void displayUsage(const std::string& program) +{ + std::cout << "Usage: " << program << " CONFIG OUTPUT [OPTIONS]" << std::endl << std::endl; + std::cout << "\tCONFIG: input configuration file" << std::endl; + std::cout << "\t\tEach line represents one audio stream analysed." << std::endl; + std::cout << "\t\tPattern of each line is:" << std::endl; + std::cout << "\t\t[inputFile]=STREAM_INDEX.CHANNEL_INDEX" << std::endl; + std::cout << "\t\tWARNING: audio phase analyser only support stereo layout, i.e. two lines in this configuration." << std::endl << std::endl; + std::cout << "\tOUTPUT: metadata output file" << std::endl; + std::cout << "\t\tPattern for each frame is:" << std::endl; + std::cout << "\t\t `frame:[FRAME_ID] pts:[PTS] pts_time:[PTS_TIME]" << std::endl; + std::cout << "\t\t lavfi.aphasemeter.phase=[PHASE_VALUE]`" << std::endl << std::endl; + std::cout << "\tOPTIONS:" << std::endl; + std::cout << "\t\t--info set log level to AV_LOG_INFO" << std::endl; + std::cout << "\t\t--debug set log level to AV_LOG_DEBUG" << std::endl; + std::cout << "\t\t--help display this help" << std::endl << std::endl; +} + +int main(int argc, char** argv) +{ + // Preload FFmpeg context + avtranscoder::preloadCodecsAndFormats(); + avtranscoder::Logger::setLogLevel(AV_LOG_QUIET); + + if(argc < 3) + { + displayUsage(argv[0]); + } + + // List command line arguments + std::vector arguments; + for(int argument = 1; argument < argc; ++argument) + { + arguments.push_back(argv[argument]); + } + for(size_t argument = 0; argument < arguments.size(); ++argument) + { + if(arguments.at(argument) == "--help") + { + displayUsage(argv[0]); + return 0; + } + else if(arguments.at(argument) == "--debug") + { + avtranscoder::Logger::setLogLevel(AV_LOG_DEBUG); + } + else if(arguments.at(argument) == "--info") + { + avtranscoder::Logger::setLogLevel(AV_LOG_INFO); + } + } + + try + { + std::string configFilePath(arguments.at(0)); + std::string outputFilePath(arguments.at(1)); + std::vector inputStreamsToAnalyse = parseConfigFile(configFilePath); + + avtranscoder::OutputFile outputFile(outputFilePath, "null"); // the output file will be overwritten by the extracted metadata + + avtranscoder::Transcoder transcoder(outputFile); + transcoder.setProcessMethod(avtranscoder::eProcessMethodBasedOnStream, 0); + transcoder.addStream(inputStreamsToAnalyse); + + avtranscoder::StreamTranscoder& streamTranscoder = transcoder.getStreamTranscoder(0); + avtranscoder::FilterGraph* filterGraph = streamTranscoder.getFilterGraph(); + filterGraph->addFilter("aphasemeter", "video=0"); + filterGraph->addFilter("ametadata", "mode=print:file=" + outputFilePath); + + avtranscoder::ConsoleProgress progress; + transcoder.process(progress); + } + catch(std::exception& e) + { + std::cerr << "ERROR: during process, an error occured: " << e.what() << std::endl; + } + catch(...) + { + std::cerr << "ERROR: during process, an unknown error occured" << std::endl; + } +} diff --git a/app/avInfo/CMakeLists.txt b/app/avInfo/CMakeLists.txt index 4cba2e5e..8eddbb4c 100644 --- a/app/avInfo/CMakeLists.txt +++ b/app/avInfo/CMakeLists.txt @@ -11,7 +11,8 @@ target_link_libraries(avinfo avtranscoder-shared) # Install app if(WIN32) - set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avinfo.exe") + set_target_properties(avinfo PROPERTIES OUTPUT_NAME "avinfo-${AVTRANSCODER_VERSION}") + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avinfo.exe" "${CMAKE_CURRENT_BINARY_DIR}/avinfo-${AVTRANSCODER_VERSION}.exe") else() set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avinfo" "${CMAKE_CURRENT_BINARY_DIR}/avinfo-${AVTRANSCODER_VERSION}") endif() diff --git a/app/avMeta/CMakeLists.txt b/app/avMeta/CMakeLists.txt index b12c99c9..2228c579 100644 --- a/app/avMeta/CMakeLists.txt +++ b/app/avMeta/CMakeLists.txt @@ -11,7 +11,8 @@ target_link_libraries(avmeta avtranscoder-shared) # Install app if(WIN32) - set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avmeta.exe") + set_target_properties(avmeta PROPERTIES OUTPUT_NAME "avmeta-${AVTRANSCODER_VERSION}") + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avmeta.exe" "${CMAKE_CURRENT_BINARY_DIR}/avmeta-${AVTRANSCODER_VERSION}.exe") else() set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avmeta" "${CMAKE_CURRENT_BINARY_DIR}/avmeta-${AVTRANSCODER_VERSION}") endif() diff --git a/app/avPlayer/CMakeLists.txt b/app/avPlayer/CMakeLists.txt index bf735c2e..61c7e05c 100644 --- a/app/avPlayer/CMakeLists.txt +++ b/app/avPlayer/CMakeLists.txt @@ -33,7 +33,8 @@ target_link_libraries(avplayer avtranscoder-shared ${OPENGL_LIBRARIES} ${GLUT_LI # Install app if(WIN32) - set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avplayer.exe") + set_target_properties(avplayer PROPERTIES OUTPUT_NAME "avplayer-${AVTRANSCODER_VERSION}") + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avplayer.exe" "${CMAKE_CURRENT_BINARY_DIR}/avplayer-${AVTRANSCODER_VERSION}.exe") else() set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avplayer" "${CMAKE_CURRENT_BINARY_DIR}/avplayer-${AVTRANSCODER_VERSION}") endif() diff --git a/app/avProcessor/CMakeLists.txt b/app/avProcessor/CMakeLists.txt index 4cb1a118..8c20bf4f 100644 --- a/app/avProcessor/CMakeLists.txt +++ b/app/avProcessor/CMakeLists.txt @@ -11,7 +11,8 @@ target_link_libraries(avprocessor avtranscoder-shared) # Install app if(WIN32) - set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avprocessor.exe") + set_target_properties(avprocessor PROPERTIES OUTPUT_NAME "avprocessor-${AVTRANSCODER_VERSION}") + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avprocessor.exe" "${CMAKE_CURRENT_BINARY_DIR}/avprocessor-${AVTRANSCODER_VERSION}.exe") else() set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/avprocessor" "${CMAKE_CURRENT_BINARY_DIR}/avprocessor-${AVTRANSCODER_VERSION}") endif() diff --git a/app/customEncoder/CMakeLists.txt b/app/customEncoder/CMakeLists.txt new file mode 100644 index 00000000..df273a35 --- /dev/null +++ b/app/customEncoder/CMakeLists.txt @@ -0,0 +1,25 @@ +### cpp/customEncoder + +# Load custom cmake utilities +set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) +include(AvTranscoderMacros) + +# Build app +add_executable(custom-encoder customEncoder.cpp) +set_target_properties(custom-encoder PROPERTIES VERSION ${AVTRANSCODER_VERSION}) +target_link_libraries(custom-encoder avtranscoder-shared) + +# Install app +if(WIN32) + set_target_properties(custom-encoder PROPERTIES OUTPUT_NAME "custom-encoder-${AVTRANSCODER_VERSION}") + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/custom-encoder.exe" "${CMAKE_CURRENT_BINARY_DIR}/custom-encoder-${AVTRANSCODER_VERSION}.exe") +else() + set(BINARY_FILES "${CMAKE_CURRENT_BINARY_DIR}/custom-encoder" "${CMAKE_CURRENT_BINARY_DIR}/custom-encoder-${AVTRANSCODER_VERSION}") +endif() + +install( + FILES ${BINARY_FILES} + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_READ WORLD_READ WORLD_EXECUTE + DESTINATION "bin/" + OPTIONAL +) diff --git a/app/customEncoder/customEncoder.cpp b/app/customEncoder/customEncoder.cpp new file mode 100644 index 00000000..0de6ce2d --- /dev/null +++ b/app/customEncoder/customEncoder.cpp @@ -0,0 +1,208 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +void parseConfigFile(const std::string& configFilename, avtranscoder::Transcoder& transcoder) +{ + std::ifstream configFile(configFilename.c_str(), std::ifstream::in); + + std::string line; + while(std::getline(configFile, line)) + { + std::istringstream is_line(line); + std::string filename; + if(std::getline(is_line, filename, '=')) + { + std::string streamId; + if(std::getline(is_line, streamId, ':')) + { + std::string transcodeProfile; + std::getline(is_line, transcodeProfile); + + std::stringstream ss(streamId); + size_t streamIndex = 0; + char separator = 'x'; + std::vector channelIndexArray; + ss >> streamIndex; + ss >> separator; + if(separator == '.') + { + int subStreamIndex = -1; + ss >> subStreamIndex; + channelIndexArray.push_back(subStreamIndex); + } + + // generated stream + if(!filename.length()) + transcoder.addGenerateStream(transcodeProfile); + else + { + avtranscoder::InputStreamDesc inputDesc(filename, streamIndex, channelIndexArray); + transcoder.addStream(inputDesc, transcodeProfile); + } + } + } + } + + configFile.close(); +} + + + +class AvExport CustomCodec + : public avtranscoder::ICodec +{ +public: + CustomCodec() + : avtranscoder::ICodec(avtranscoder::eCodecTypeEncoder, AV_CODEC_ID_PCM_S24LE) + { + } + + void openCodec(){} + void closeCodec(){} + + std::string getCodecName() const { return "Custom Encoder"; }; + AVCodecID getCodecId() const { return AV_CODEC_ID_PCM_S24LE; } + avtranscoder::ECodecType getCodecType() const { return avtranscoder::eCodecTypeEncoder; } + int getLatency() const { return 0; } + + avtranscoder::OptionArray getOptions() { + std::vector options; + return options; + } +}; + + +class AvExport CustomEncoder + : public avtranscoder::IEncoder +{ +public: + CustomEncoder() + : _codec() + {} + /** + * @brief Setup the encoder + * @param profile: set encoder parameters from the given profile + * @note Open the encoder. + */ + void setupEncoder(const avtranscoder::ProfileLoader::Profile& profile = avtranscoder::ProfileLoader::Profile()) { + return; + }; + + /** + * @brief Encode a new frame, and get coded frame + * @param sourceFrame: frame that needs to be encoded + * @param codedFrame: output encoded coded data (first frames can be delayed) + * @return status of encoding + * @throw runtime_error if the encoded process failed. + */ + bool encodeFrame(const avtranscoder::IFrame& sourceFrame, avtranscoder::CodedData& codedFrame) { + codedFrame.assign(5760, 0); + return true; + }; + + /** + * @brief Get the frames remaining into the encoder + * @param codedFrame: output encoded data + * @return status of encoding + * @throw runtime_error if the encoded process failed. + */ + bool encodeFrame(avtranscoder::CodedData& codedFrame) { + return false; + }; + + /** + * @brief Get codec used for encoding. + * @return a reference to the codec + */ + avtranscoder::ICodec& getCodec() { + return _codec; + }; + +private: + CustomCodec _codec; +}; + + +int main(int argc, char** argv) +{ + std::string help; + help += "Usage\n"; + help += "\tavprocessor INPUT_FILE_NAME OUTPUT_FILE_NAME [--verbose] [--logFile] [--help]\n"; + help += "Command line options\n"; + help += "\t--verbose: set log level to AV_LOG_DEBUG\n"; + help += "\t--logFile: put log in 'avtranscoder.log' file\n"; + help += "\t--help: display this help\n"; + + // Preload FFmpeg context + avtranscoder::preloadCodecsAndFormats(); + avtranscoder::Logger::setLogLevel(AV_LOG_QUIET); + + // List command line arguments + std::vector arguments; + for(int argument = 1; argument < argc; ++argument) + { + arguments.push_back(argv[argument]); + } + for(size_t argument = 0; argument < arguments.size(); ++argument) + { + if(arguments.at(argument) == "--help") + { + std::cout << help << std::endl; + return 0; + } + else if(arguments.at(argument) == "--verbose") + { + avtranscoder::Logger::setLogLevel(AV_LOG_DEBUG); + } + else if(arguments.at(argument) == "--logFile") + { + avtranscoder::Logger::logInFile(); + } + } + + // Check required arguments + if(argc < 3) + { + std::cout << "avprocessor can rewrap or transcode inputs to create an output media file." << std::endl; + std::cout << "Use option --help to display help" << std::endl; + return (-1); + } + + try + { + std::string output_format = "s24le"; + avtranscoder::OutputFile outputFile(argv[2], output_format); + + avtranscoder::Transcoder transcoder(outputFile); + transcoder.setProcessMethod(avtranscoder::eProcessMethodBasedOnStream); + + CustomEncoder* customEncoder = new CustomEncoder; + avtranscoder::InputStreamDesc inputDescLeft(argv[1], 1, 0); + avtranscoder::InputStreamDesc inputDescRight(argv[1], 2, 0); + + std::vector inputDescriptors; + inputDescriptors.push_back(avtranscoder::InputStreamDesc(argv[1], 1, 0)); + inputDescriptors.push_back(avtranscoder::InputStreamDesc(argv[1], 2, 0)); + + transcoder.addStream(inputDescriptors, customEncoder); + + avtranscoder::ConsoleProgress progress; + transcoder.process(progress); + } + catch(std::exception& e) + { + std::cerr << "ERROR: during process, an error occured: " << e.what() << std::endl; + } + catch(...) + { + std::cerr << "ERROR: during process, an unknown error occured" << std::endl; + } +} diff --git a/appveyor.yml b/appveyor.yml index adc2bc2c..a809dd3e 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -8,7 +8,7 @@ platform: environment: global: DEPENDENCY_NAME: ffmpeg - DEPENDENCY_VERSION: 2.4.5 + DEPENDENCY_VERSION: 4.2.1 DEPENDENCY_INSTALL_PATH: C:\ProgramData\install-dependency AVTRANSCODER_INSTALL_PATH: C:\projects\avtranscoder\build\install-avtranscoder @@ -25,10 +25,10 @@ install: # Get the correct python version - ps: if($env:platform -eq 'x86') { - $env:PYTHON = "C:\Python27"; + $env:PYTHON = "C:\Python35"; } else { - $env:PYTHON = "C:\Python27-x64"; + $env:PYTHON = "C:\Python35-x64"; } # Prepend newly installed Python to the PATH of this build - cmd: set PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% @@ -37,7 +37,7 @@ install: - "python --version" - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" # Upgrade to the latest version of pip to avoid it displaying warnings about it being out of date. - - pip install --disable-pip-version-check --user --upgrade pip + - "python -m pip install --disable-pip-version-check --user --upgrade pip" # Install tests dependencies - pip install nose diff --git a/src/AvTranscoder/Library.cpp b/src/AvTranscoder/Library.cpp index 961a8906..aed1f827 100644 --- a/src/AvTranscoder/Library.cpp +++ b/src/AvTranscoder/Library.cpp @@ -11,6 +11,7 @@ extern "C" { #else #include #endif +#include #include #include } @@ -105,9 +106,10 @@ Libraries getLibraries() std::vector getInputExtensions() { std::vector extensions; - AVInputFormat* iFormat = NULL; + const AVInputFormat* iFormat = NULL; + void *iFormatOpaque = NULL; - while((iFormat = av_iformat_next(iFormat))) + while((iFormat = av_demuxer_iterate(&iFormatOpaque))) { if(iFormat->extensions != NULL) { @@ -143,9 +145,10 @@ std::vector getInputExtensions() std::vector getOutputExtensions() { std::vector extensions; - AVOutputFormat* oFormat = NULL; + const AVOutputFormat* oFormat = NULL; + void *oFormatOpaque = NULL; - while((oFormat = av_oformat_next(oFormat))) + while((oFormat = av_muxer_iterate(&oFormatOpaque))) { if(oFormat->extensions != NULL) { diff --git a/src/AvTranscoder/avTranscoder.i b/src/AvTranscoder/avTranscoder.i index 5005baa8..175a6c22 100644 --- a/src/AvTranscoder/avTranscoder.i +++ b/src/AvTranscoder/avTranscoder.i @@ -6,6 +6,7 @@ %include "std_vector.i" %include "std_pair.i" %include "std_map.i" +%include "stdint.i" %include "AvTranscoder/swig/avException.i" %include "AvTranscoder/swig/avExport.i" diff --git a/src/AvTranscoder/codec/ICodec.hpp b/src/AvTranscoder/codec/ICodec.hpp index de3203dd..8ad135ee 100644 --- a/src/AvTranscoder/codec/ICodec.hpp +++ b/src/AvTranscoder/codec/ICodec.hpp @@ -54,7 +54,6 @@ class AvExport ICodec #ifndef SWIG AVCodecContext& getAVCodecContext() { return *_avCodecContext; } const AVCodecContext& getAVCodecContext() const { return *_avCodecContext; } - AVCodec& getAVCodec() { return *_avCodec; } const AVCodec& getAVCodec() const { return *_avCodec; } #endif @@ -66,7 +65,7 @@ class AvExport ICodec protected: AVCodecContext* _avCodecContext; ///< Full codec instance description (has ownership) - AVCodec* _avCodec; ///< Codec abstract description + const AVCodec* _avCodec; ///< Codec abstract description const bool _isCodecContextAllocated; ///< Is the AVCodecContext allocated by the class ECodecType _type; diff --git a/src/AvTranscoder/common.cpp b/src/AvTranscoder/common.cpp index 5b683c83..86022a79 100644 --- a/src/AvTranscoder/common.cpp +++ b/src/AvTranscoder/common.cpp @@ -15,8 +15,10 @@ namespace avtranscoder void preloadCodecsAndFormats() { +#if LIBAVFILTER_VERSION_MAJOR < 7 av_register_all(); avfilter_register_all(); +#endif } std::string getDescriptionFromErrorCode(const int code) diff --git a/src/AvTranscoder/common.hpp b/src/AvTranscoder/common.hpp index 93112af7..c249e3a3 100644 --- a/src/AvTranscoder/common.hpp +++ b/src/AvTranscoder/common.hpp @@ -2,7 +2,7 @@ #define _AV_TRANSCODER_COMMON_HPP_ #define AVTRANSCODER_VERSION_MAJOR 0 -#define AVTRANSCODER_VERSION_MINOR 14 +#define AVTRANSCODER_VERSION_MINOR 16 #define AVTRANSCODER_VERSION_MICRO 2 #include diff --git a/src/AvTranscoder/data/coded/CodedData.cpp b/src/AvTranscoder/data/coded/CodedData.cpp index 8c556b69..074a0f83 100644 --- a/src/AvTranscoder/data/coded/CodedData.cpp +++ b/src/AvTranscoder/data/coded/CodedData.cpp @@ -43,7 +43,7 @@ CodedData& CodedData::operator=(const CodedData& other) CodedData::~CodedData() { - av_free_packet(&_packet); + av_packet_unref(&_packet); } void CodedData::resize(const size_t newSize) @@ -75,7 +75,7 @@ void CodedData::refData(CodedData& frame) void CodedData::clear() { - av_free_packet(&_packet); + av_packet_unref(&_packet); initAVPacket(); } @@ -87,14 +87,16 @@ void CodedData::assign(const size_t size, const int value) void CodedData::initAVPacket() { - av_init_packet(&_packet); + _packet = *av_packet_alloc(); _packet.data = NULL; _packet.size = 0; } void CodedData::copyAVPacket(const AVPacket& avPacket) { -#if AVTRANSCODER_FFMPEG_DEPENDENCY && LIBAVCODEC_VERSION_INT > AV_VERSION_INT(54, 56, 0) +#if AVTRANSCODER_FFMPEG_DEPENDENCY && LIBAVCODEC_VERSION_MAJOR > 57 + av_packet_ref(&_packet, &avPacket); +#elif AVTRANSCODER_FFMPEG_DEPENDENCY && LIBAVCODEC_VERSION_INT > AV_VERSION_INT(54, 56, 0) // Need const_cast for libav versions from 54.56. to 55.56. av_copy_packet(&_packet, const_cast(&avPacket)); #else diff --git a/src/AvTranscoder/data/decoded/AudioFrame.cpp b/src/AvTranscoder/data/decoded/AudioFrame.cpp index 8dfd3bd8..54fa8119 100644 --- a/src/AvTranscoder/data/decoded/AudioFrame.cpp +++ b/src/AvTranscoder/data/decoded/AudioFrame.cpp @@ -45,9 +45,9 @@ AudioFrame::AudioFrame(const AudioFrameDesc& desc, const bool forceDataAllocatio , _desc(desc) { // Set Frame properties - av_frame_set_sample_rate(_frame, desc._sampleRate); - av_frame_set_channels(_frame, desc._nbChannels); - av_frame_set_channel_layout(_frame, av_get_default_channel_layout(desc._nbChannels)); + _frame->sample_rate = desc._sampleRate; + _frame->channels = desc._nbChannels; + _frame->channel_layout = av_get_default_channel_layout(desc._nbChannels); _frame->format = desc._sampleFormat; _frame->nb_samples = getDefaultNbSamples(); @@ -102,9 +102,9 @@ void AudioFrame::allocateData() LOG_WARN("The AudioFrame seems to already have allocated data. This could lead to memory leaks.") // Set Frame properties - av_frame_set_sample_rate(_frame, _desc._sampleRate); - av_frame_set_channels(_frame, _desc._nbChannels); - av_frame_set_channel_layout(_frame, av_get_default_channel_layout(_desc._nbChannels)); + _frame->sample_rate = _desc._sampleRate; + _frame->channels = _desc._nbChannels; + _frame->channel_layout = av_get_default_channel_layout(_desc._nbChannels); _frame->format = _desc._sampleFormat; if(_frame->nb_samples == 0) _frame->nb_samples = getDefaultNbSamples(); diff --git a/src/AvTranscoder/data/decoded/AudioFrame.hpp b/src/AvTranscoder/data/decoded/AudioFrame.hpp index 6bf00175..f8d1cb8b 100644 --- a/src/AvTranscoder/data/decoded/AudioFrame.hpp +++ b/src/AvTranscoder/data/decoded/AudioFrame.hpp @@ -51,9 +51,9 @@ class AvExport AudioFrame : public IFrame void freeData(); size_t getDataSize() const; - size_t getSampleRate() const { return av_frame_get_sample_rate(_frame); } - size_t getNbChannels() const { return av_frame_get_channels(_frame); } - size_t getChannelLayout() const { return av_frame_get_channel_layout(_frame); } + size_t getSampleRate() const { return _frame->sample_rate; } + size_t getNbChannels() const { return _frame->channels; } + size_t getChannelLayout() const { return _frame->channel_layout; } std::string getChannelLayoutDesc() const; ///< Get a description of a channel layout (example: '5.1'). AVSampleFormat getSampleFormat() const { return static_cast(_frame->format); } size_t getBytesPerSample() const; ///< 0 if unknown sample format diff --git a/src/AvTranscoder/data/decoded/IFrame.cpp b/src/AvTranscoder/data/decoded/IFrame.cpp index 608f90e4..84efa32b 100644 --- a/src/AvTranscoder/data/decoded/IFrame.cpp +++ b/src/AvTranscoder/data/decoded/IFrame.cpp @@ -69,7 +69,7 @@ void IFrame::assignValue(const unsigned char value) // Create the buffer const int bufferSize = getDataSize(); - unsigned char* dataBuffer = static_cast(malloc(bufferSize * sizeof(unsigned char))); + unsigned char* dataBuffer = static_cast(av_malloc(bufferSize * sizeof(unsigned char))); memset(dataBuffer, value, bufferSize); // Fill the frame diff --git a/src/AvTranscoder/data/decoded/VideoFrame.cpp b/src/AvTranscoder/data/decoded/VideoFrame.cpp index 4da08b00..631ffc76 100644 --- a/src/AvTranscoder/data/decoded/VideoFrame.cpp +++ b/src/AvTranscoder/data/decoded/VideoFrame.cpp @@ -5,6 +5,7 @@ extern "C" { #include #include +#include } #include @@ -75,7 +76,7 @@ size_t VideoFrame::getDataSize() const return 0; } - const size_t size = avpicture_get_size(getPixelFormat(), getWidth(), getHeight()); + const size_t size = av_image_get_buffer_size(getPixelFormat(), getWidth(), getHeight(), 1); if(size == 0) throw std::runtime_error("Unable to determine image buffer size: " + getDescriptionFromErrorCode(size)); return size; @@ -92,7 +93,7 @@ void VideoFrame::allocateData() _frame->format = _desc._pixelFormat; // Allocate data - const int ret = avpicture_alloc(reinterpret_cast(_frame), _desc._pixelFormat, _desc._width, _desc._height); + const int ret = av_image_alloc(_frame->data, _frame->linesize, _desc._width, _desc._height, _desc._pixelFormat, 1); if(ret < 0) { const std::string formatName = getPixelFormatName(_desc._pixelFormat); @@ -109,14 +110,13 @@ void VideoFrame::allocateData() void VideoFrame::freeData() { - avpicture_free(reinterpret_cast(_frame)); + av_freep(&_frame->data[0]); _dataAllocated = false; } void VideoFrame::assignBuffer(const unsigned char* ptrValue) { - const int ret = - avpicture_fill(reinterpret_cast(_frame), ptrValue, getPixelFormat(), getWidth(), getHeight()); + const int ret = av_image_fill_arrays(_frame->data, _frame->linesize, ptrValue, getPixelFormat(), getWidth(), getHeight(), 1); if(ret < 0) { std::stringstream msg; diff --git a/src/AvTranscoder/decoder/AudioDecoder.cpp b/src/AvTranscoder/decoder/AudioDecoder.cpp index a995a3df..1a7a7d99 100644 --- a/src/AvTranscoder/decoder/AudioDecoder.cpp +++ b/src/AvTranscoder/decoder/AudioDecoder.cpp @@ -87,7 +87,7 @@ bool AudioDecoder::decodeNextFrame(IFrame& frameBuffer) if(!_isSetup) setupDecoder(); - int got_frame = 0; + bool got_frame = false; while(!got_frame) { CodedData data; @@ -98,18 +98,27 @@ bool AudioDecoder::decodeNextFrame(IFrame& frameBuffer) // decoding // @note could be called several times to return the remaining frames (last call with an empty packet) // @see CODEC_CAP_DELAY - int ret = avcodec_decode_audio4(&_inputStream->getAudioCodec().getAVCodecContext(), &frameBuffer.getAVFrame(), - &got_frame, &data.getAVPacket()); + int ret = avcodec_send_packet(&_inputStream->getAudioCodec().getAVCodecContext(), &data.getAVPacket()); + if(ret < 0) { - throw std::runtime_error("An error occurred during audio decoding: " + getDescriptionFromErrorCode(ret)); + throw std::runtime_error("An error occurred sending audio packet to decoder: " + getDescriptionFromErrorCode(ret)); } + ret = avcodec_receive_frame(&_inputStream->getAudioCodec().getAVCodecContext(), &frameBuffer.getAVFrame()); + + if (ret == 0) + got_frame = true; + else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + got_frame = false; + else + throw std::runtime_error("An error occurred receiving audio packet from decoder: " + getDescriptionFromErrorCode(ret)); + // fixed channel layout value after decoding frameBuffer.getAVFrame().channel_layout = channelLayout; // if no frame could be decompressed - if(!nextPacketRead && ret == 0 && got_frame == 0) + if(!nextPacketRead && got_frame == 0) decodeNextFrame = false; else decodeNextFrame = true; @@ -121,6 +130,10 @@ bool AudioDecoder::decodeNextFrame(IFrame& frameBuffer) return false; } } + + if(decodeNextFrame) + incrementNbDecodedFrames(frameBuffer.getAVFrame().nb_samples); + return decodeNextFrame; } diff --git a/src/AvTranscoder/decoder/AudioGenerator.cpp b/src/AvTranscoder/decoder/AudioGenerator.cpp index 63b1a4b1..6842ba21 100644 --- a/src/AvTranscoder/decoder/AudioGenerator.cpp +++ b/src/AvTranscoder/decoder/AudioGenerator.cpp @@ -41,6 +41,14 @@ bool AudioGenerator::decodeNextFrame(IFrame& frameBuffer) // (which was allocated to expect this number of samples). _silent->setNbSamplesPerChannel(frameBuffer.getAVFrame().nb_samples); } + + if(_silent->getNbSamplesPerChannel() != (size_t)frameBuffer.getAVFrame().nb_samples) { + LOG_DEBUG("Reset next audio frame nb samples and reallocate.") + frameBuffer.getAVFrame().nb_samples = _silent->getNbSamplesPerChannel(); + frameBuffer.freeData(); + frameBuffer.allocateData(); + } + LOG_DEBUG("Copy data of the silence when decode next frame") frameBuffer.copyData(*_silent); } @@ -50,6 +58,8 @@ bool AudioGenerator::decodeNextFrame(IFrame& frameBuffer) LOG_DEBUG("Convert data of the audio specified when decode next frame") _audioTransform.convert(*_inputFrame, frameBuffer); } + + incrementNbDecodedFrames(_silent->getNbSamplesPerChannel()); return true; } diff --git a/src/AvTranscoder/decoder/IDecoder.hpp b/src/AvTranscoder/decoder/IDecoder.hpp index d9acf8d1..66062140 100644 --- a/src/AvTranscoder/decoder/IDecoder.hpp +++ b/src/AvTranscoder/decoder/IDecoder.hpp @@ -10,6 +10,12 @@ namespace avtranscoder class AvExport IDecoder { +protected: + IDecoder() + : _decoded_frames_counter(0) + { + } + public: virtual ~IDecoder(){}; @@ -51,6 +57,16 @@ class AvExport IDecoder * @note Not sense for generators. */ virtual void flushDecoder() {} + + size_t getNbDecodedFrames() { return _decoded_frames_counter; } + +protected: + void incrementNbDecodedFrames(const size_t& nb_frames = 1) { + _decoded_frames_counter += nb_frames; + } + +private: + size_t _decoded_frames_counter; }; } diff --git a/src/AvTranscoder/decoder/VideoDecoder.cpp b/src/AvTranscoder/decoder/VideoDecoder.cpp index ae76fb05..447913be 100644 --- a/src/AvTranscoder/decoder/VideoDecoder.cpp +++ b/src/AvTranscoder/decoder/VideoDecoder.cpp @@ -94,15 +94,22 @@ bool VideoDecoder::decodeNextFrame(IFrame& frameBuffer) // decoding // @note could be called several times to return the remaining frames (last call with an empty packet) // @see CODEC_CAP_DELAY - const int ret = avcodec_decode_video2(&_inputStream->getVideoCodec().getAVCodecContext(), &frameBuffer.getAVFrame(), - &got_frame, &data.getAVPacket()); - if(ret < 0) - { - throw std::runtime_error("An error occurred during video decoding: " + getDescriptionFromErrorCode(ret)); - } + int ret = avcodec_send_packet(&_inputStream->getVideoCodec().getAVCodecContext(), &data.getAVPacket()); + + if (ret < 0 && (nextPacketRead || ret != AVERROR_EOF)) + throw std::runtime_error("An error occurred sending video packet to decoder: " + getDescriptionFromErrorCode(ret)); + + ret = avcodec_receive_frame(&_inputStream->getVideoCodec().getAVCodecContext(), &frameBuffer.getAVFrame()); + + if (ret == 0) + got_frame = true; + else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + got_frame = false; + else + throw std::runtime_error("An error occurred receiving video packet from decoder: " + getDescriptionFromErrorCode(ret)); // if no frame could be decompressed - if(!nextPacketRead && ret == 0 && got_frame == 0) + if ((!nextPacketRead && ret == 0) || !got_frame) decodeNextFrame = false; else decodeNextFrame = true; @@ -114,6 +121,10 @@ bool VideoDecoder::decodeNextFrame(IFrame& frameBuffer) return false; } } + + if(decodeNextFrame) + incrementNbDecodedFrames(); + return decodeNextFrame; } diff --git a/src/AvTranscoder/decoder/VideoGenerator.cpp b/src/AvTranscoder/decoder/VideoGenerator.cpp index ad89c9ac..155d635d 100644 --- a/src/AvTranscoder/decoder/VideoGenerator.cpp +++ b/src/AvTranscoder/decoder/VideoGenerator.cpp @@ -51,6 +51,8 @@ bool VideoGenerator::decodeNextFrame(IFrame& frameBuffer) LOG_DEBUG("Convert data of the image specified when decode next frame") _videoTransform.convert(*_inputFrame, frameBuffer); } + + incrementNbDecodedFrames(); return true; } diff --git a/src/AvTranscoder/encoder/AudioEncoder.cpp b/src/AvTranscoder/encoder/AudioEncoder.cpp index 06410299..0b0621d6 100644 --- a/src/AvTranscoder/encoder/AudioEncoder.cpp +++ b/src/AvTranscoder/encoder/AudioEncoder.cpp @@ -7,6 +7,7 @@ extern "C" { } #include +#include namespace avtranscoder { @@ -93,20 +94,19 @@ void AudioEncoder::setupEncoder(const ProfileLoader::Profile& profile) bool AudioEncoder::encodeFrame(const IFrame& sourceFrame, CodedData& codedFrame) { - AVCodecContext& avCodecContext = _codec.getAVCodecContext(); - AVPacket& packet = codedFrame.getAVPacket(); - if((avCodecContext.coded_frame) && (avCodecContext.coded_frame->pts != (int)AV_NOPTS_VALUE)) + const AVFrame& srcAvFrame = sourceFrame.getAVFrame(); + if(srcAvFrame.pts != (int)AV_NOPTS_VALUE) { - packet.pts = avCodecContext.coded_frame->pts; + packet.pts = srcAvFrame.pts; } - if(avCodecContext.coded_frame && avCodecContext.coded_frame->key_frame) + if(srcAvFrame.key_frame) { packet.flags |= AV_PKT_FLAG_KEY; } - return encode(&sourceFrame.getAVFrame(), packet); + return encode(&srcAvFrame, packet); } bool AudioEncoder::encodeFrame(CodedData& codedFrame) @@ -120,7 +120,23 @@ bool AudioEncoder::encode(const AVFrame* decodedData, AVPacket& encodedData) encodedData.data = NULL; AVCodecContext& avCodecContext = _codec.getAVCodecContext(); -#if LIBAVCODEC_VERSION_MAJOR > 53 +#if LIBAVCODEC_VERSION_MAJOR > 58 + int ret = avcodec_send_frame(&avCodecContext, decodedData); + + if(ret != 0) + throw std::runtime_error("Error sending audio frame to encoder: " + getDescriptionFromErrorCode(ret)); + + ret = avcodec_receive_packet(&avCodecContext, &encodedData); + + if (ret == 0) + return true; + + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + return false; + + throw std::runtime_error("Error receiving audio frame from encoder: " + getDescriptionFromErrorCode(ret)); + +#elif LIBAVCODEC_VERSION_MAJOR > 53 int gotPacket = 0; const int ret = avcodec_encode_audio2(&avCodecContext, &encodedData, decodedData, &gotPacket); if(ret != 0) diff --git a/src/AvTranscoder/encoder/AudioEncoder.hpp b/src/AvTranscoder/encoder/AudioEncoder.hpp index 0c8e0a55..7a39abc2 100644 --- a/src/AvTranscoder/encoder/AudioEncoder.hpp +++ b/src/AvTranscoder/encoder/AudioEncoder.hpp @@ -1,7 +1,7 @@ #ifndef _AV_TRANSCODER_ENCODER_AUDIO_ENCODER_HPP_ #define _AV_TRANSCODER_ENCODER_AUDIO_ENCODER_HPP_ -#include "IEncoder.hpp" +#include #include #include diff --git a/src/AvTranscoder/encoder/VideoEncoder.cpp b/src/AvTranscoder/encoder/VideoEncoder.cpp index c6d43b66..d8662ff8 100644 --- a/src/AvTranscoder/encoder/VideoEncoder.cpp +++ b/src/AvTranscoder/encoder/VideoEncoder.cpp @@ -77,7 +77,11 @@ void VideoEncoder::setupEncoder(const ProfileLoader::Profile& profile) if(profile.count(constants::avProfileProcessStat)) { LOG_INFO("SetUp video encoder to compute statistics during process") +#ifdef AV_CODEC_FLAG_PSNR + encoderFlags |= AV_CODEC_FLAG_PSNR; +#else encoderFlags |= CODEC_FLAG_PSNR; +#endif } _codec.getAVCodecContext().flags |= encoderFlags; _codec.openCodec(); @@ -106,20 +110,19 @@ void VideoEncoder::setupEncoder(const ProfileLoader::Profile& profile) bool VideoEncoder::encodeFrame(const IFrame& sourceFrame, CodedData& codedFrame) { - AVCodecContext& avCodecContext = _codec.getAVCodecContext(); - AVPacket& packet = codedFrame.getAVPacket(); - if((avCodecContext.coded_frame) && (avCodecContext.coded_frame->pts != (int)AV_NOPTS_VALUE)) + const AVFrame& srcAvFrame = sourceFrame.getAVFrame(); + if(srcAvFrame.pts != (int)AV_NOPTS_VALUE) { - packet.pts = avCodecContext.coded_frame->pts; + packet.pts = srcAvFrame.pts; } - if(avCodecContext.coded_frame && avCodecContext.coded_frame->key_frame) + if(srcAvFrame.key_frame) { packet.flags |= AV_PKT_FLAG_KEY; } - return encode(&sourceFrame.getAVFrame(), packet); + return encode(&srcAvFrame, packet); } bool VideoEncoder::encodeFrame(CodedData& codedFrame) @@ -133,7 +136,23 @@ bool VideoEncoder::encode(const AVFrame* decodedData, AVPacket& encodedData) encodedData.data = NULL; AVCodecContext& avCodecContext = _codec.getAVCodecContext(); -#if LIBAVCODEC_VERSION_MAJOR > 53 +#if LIBAVCODEC_VERSION_MAJOR > 58 + int ret = avcodec_send_frame(&avCodecContext, decodedData); + + if (ret != 0 && ret != AVERROR_EOF) + throw std::runtime_error("Error sending video frame to encoder: " + getDescriptionFromErrorCode(ret)); + + ret = avcodec_receive_packet(&avCodecContext, &encodedData); + + if (ret == 0) + return true; + + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + return false; + + throw std::runtime_error("Error receiving video frame from encoder: " + getDescriptionFromErrorCode(ret)); + +#elif LIBAVCODEC_VERSION_MAJOR > 53 int gotPacket = 0; const int ret = avcodec_encode_video2(&avCodecContext, &encodedData, decodedData, &gotPacket); if(ret != 0) @@ -141,6 +160,7 @@ bool VideoEncoder::encode(const AVFrame* decodedData, AVPacket& encodedData) throw std::runtime_error("Encode video frame error: avcodec encode video frame - " + getDescriptionFromErrorCode(ret)); } + return gotPacket == 1; #else const int ret = avcodec_encode_video(&avCodecContext, encodedData.data, encodedData.size, decodedData); diff --git a/src/AvTranscoder/encoder/VideoEncoder.hpp b/src/AvTranscoder/encoder/VideoEncoder.hpp index d12035e6..6f7493b6 100644 --- a/src/AvTranscoder/encoder/VideoEncoder.hpp +++ b/src/AvTranscoder/encoder/VideoEncoder.hpp @@ -1,7 +1,7 @@ #ifndef _AV_TRANSCODER_ENCODER_VIDEO_ENCODER_HPP_ #define _AV_TRANSCODER_ENCODER_VIDEO_ENCODER_HPP_ -#include "IEncoder.hpp" +#include #include #include diff --git a/src/AvTranscoder/file/FormatContext.cpp b/src/AvTranscoder/file/FormatContext.cpp index 51d78f4e..cd423e85 100644 --- a/src/AvTranscoder/file/FormatContext.cpp +++ b/src/AvTranscoder/file/FormatContext.cpp @@ -46,10 +46,6 @@ FormatContext::~FormatContext() if(!_avFormatContext) return; - // free the streams added - for(std::vector::iterator it = _avStreamAllocated.begin(); it != _avStreamAllocated.end(); ++it) - avcodec_close((*it)->codec); - // free the format context if(_isOpen) avformat_close_input(&_avFormatContext); @@ -151,7 +147,7 @@ AVStream& FormatContext::addAVStream(const AVCodec& avCodec) bool FormatContext::seek(const uint64_t position, const int flag) { - LOG_INFO("Seek in '" << _avFormatContext->filename << "' at " << position << " with flag '" << flag << "'") + LOG_INFO("Seek in '" << _avFormatContext->url << "' at " << position << " with flag '" << flag << "'") const int err = av_seek_frame(_avFormatContext, -1, position, flag); if(err < 0) { @@ -186,12 +182,13 @@ AVStream& FormatContext::getAVStream(size_t index) const void FormatContext::setFilename(const std::string& filename) { - strcpy(&_avFormatContext->filename[0], filename.c_str()); + _avFormatContext->url = (char*)av_malloc(filename.size()); + strcpy(_avFormatContext->url, filename.c_str()); } void FormatContext::setOutputFormat(const std::string& filename, const std::string& shortName, const std::string& mimeType) { - AVOutputFormat* oformat = av_guess_format(shortName.c_str(), filename.c_str(), mimeType.c_str()); + const AVOutputFormat* oformat = av_guess_format(shortName.c_str(), filename.c_str(), mimeType.c_str()); if(!oformat) { std::string msg("Unable to find format for "); diff --git a/src/AvTranscoder/file/FormatContext.hpp b/src/AvTranscoder/file/FormatContext.hpp index 0a33bbe1..b4283e19 100644 --- a/src/AvTranscoder/file/FormatContext.hpp +++ b/src/AvTranscoder/file/FormatContext.hpp @@ -113,8 +113,8 @@ class AvExport FormatContext #ifndef SWIG AVFormatContext& getAVFormatContext() const { return *_avFormatContext; } - AVOutputFormat& getAVOutputFormat() const { return *_avFormatContext->oformat; } - AVInputFormat& getAVInputFormat() const { return *_avFormatContext->iformat; } + const AVOutputFormat& getAVOutputFormat() const { return *_avFormatContext->oformat; } + const AVInputFormat& getAVInputFormat() const { return *_avFormatContext->iformat; } AVIOContext& getAVIOContext() const { return *_avFormatContext->pb; } AVDictionary& getAVMetaData() const { return *_avFormatContext->metadata; } AVStream& getAVStream(size_t index) const; diff --git a/src/AvTranscoder/file/IOutputFile.hpp b/src/AvTranscoder/file/IOutputFile.hpp index 4d9dc233..c070cdb7 100644 --- a/src/AvTranscoder/file/IOutputFile.hpp +++ b/src/AvTranscoder/file/IOutputFile.hpp @@ -51,6 +51,15 @@ class AvExport IOutputFile throw std::logic_error("function is not implemented"); } + /** + * @brief Add a custom output stream + * @param iCodecDesc description of output codec + **/ + virtual IOutputStream& addCustomStream(const ICodec& iCodecDesc) + { + throw std::logic_error("function is not implemented"); + } + /** * @brief Write the header of file (if necessary) **/ diff --git a/src/AvTranscoder/file/OutputFile.cpp b/src/AvTranscoder/file/OutputFile.cpp index 8d292b95..49711abc 100644 --- a/src/AvTranscoder/file/OutputFile.cpp +++ b/src/AvTranscoder/file/OutputFile.cpp @@ -3,6 +3,7 @@ #include #include +#include #ifndef FF_INPUT_BUFFER_PADDING_SIZE #define FF_INPUT_BUFFER_PADDING_SIZE 16 @@ -35,31 +36,33 @@ IOutputStream& OutputFile::addVideoStream(const VideoCodec& videoDesc) { AVStream& stream = _formatContext.addAVStream(videoDesc.getAVCodec()); - stream.codec->width = videoDesc.getAVCodecContext().width; - stream.codec->height = videoDesc.getAVCodecContext().height; - stream.codec->bit_rate = videoDesc.getAVCodecContext().bit_rate; - stream.codec->pix_fmt = videoDesc.getAVCodecContext().pix_fmt; - stream.codec->profile = videoDesc.getAVCodecContext().profile; - stream.codec->level = videoDesc.getAVCodecContext().level; - stream.codec->field_order = videoDesc.getAVCodecContext().field_order; + stream.codecpar->codec_type = videoDesc.getAVCodecContext().codec_type; + stream.codecpar->codec_id = videoDesc.getAVCodecContext().codec_id; + stream.codecpar->codec_tag = videoDesc.getAVCodecContext().codec_tag; - stream.codec->colorspace = videoDesc.getAVCodecContext().colorspace; - stream.codec->color_primaries = videoDesc.getAVCodecContext().color_primaries; - stream.codec->color_range = videoDesc.getAVCodecContext().color_range; - stream.codec->color_trc = videoDesc.getAVCodecContext().color_trc; - stream.codec->chroma_sample_location = videoDesc.getAVCodecContext().chroma_sample_location; + stream.codecpar->width = videoDesc.getAVCodecContext().width; + stream.codecpar->height = videoDesc.getAVCodecContext().height; + stream.codecpar->bit_rate = videoDesc.getAVCodecContext().bit_rate; + stream.codecpar->format = videoDesc.getAVCodecContext().pix_fmt; + stream.codecpar->profile = videoDesc.getAVCodecContext().profile; + stream.codecpar->level = videoDesc.getAVCodecContext().level; + stream.codecpar->field_order = videoDesc.getAVCodecContext().field_order; + + stream.codecpar->color_space = videoDesc.getAVCodecContext().colorspace; + stream.codecpar->color_primaries = videoDesc.getAVCodecContext().color_primaries; + stream.codecpar->color_range = videoDesc.getAVCodecContext().color_range; + stream.codecpar->color_trc = videoDesc.getAVCodecContext().color_trc; + stream.codecpar->chroma_location = videoDesc.getAVCodecContext().chroma_sample_location; setOutputStream(stream, videoDesc); // need to set the time_base on the AVCodecContext and the AVStream // compensating the frame rate with the ticks_per_frame and keeping // a coherent reading speed. - av_reduce(&stream.codec->time_base.num, &stream.codec->time_base.den, + av_reduce(&stream.time_base.num, &stream.time_base.den, videoDesc.getAVCodecContext().time_base.num * videoDesc.getAVCodecContext().ticks_per_frame, videoDesc.getAVCodecContext().time_base.den, INT_MAX); - stream.time_base = stream.codec->time_base; - OutputStream* outputStream = new OutputStream(*this, _formatContext.getNbStreams() - 1); _outputStreams.push_back(outputStream); @@ -70,16 +73,20 @@ IOutputStream& OutputFile::addAudioStream(const AudioCodec& audioDesc) { AVStream& stream = _formatContext.addAVStream(audioDesc.getAVCodec()); - stream.codec->sample_rate = audioDesc.getAVCodecContext().sample_rate; - stream.codec->channels = audioDesc.getAVCodecContext().channels; - stream.codec->channel_layout = audioDesc.getAVCodecContext().channel_layout; - stream.codec->sample_fmt = audioDesc.getAVCodecContext().sample_fmt; - stream.codec->frame_size = audioDesc.getAVCodecContext().frame_size; + stream.codecpar->codec_type = audioDesc.getAVCodecContext().codec_type; + stream.codecpar->codec_id = audioDesc.getAVCodecContext().codec_id; + stream.codecpar->codec_tag = audioDesc.getAVCodecContext().codec_tag; + + stream.codecpar->sample_rate = audioDesc.getAVCodecContext().sample_rate; + stream.codecpar->channels = audioDesc.getAVCodecContext().channels; + stream.codecpar->channel_layout = audioDesc.getAVCodecContext().channel_layout; + stream.codecpar->format = audioDesc.getAVCodecContext().sample_fmt; + stream.codecpar->frame_size = audioDesc.getAVCodecContext().frame_size; setOutputStream(stream, audioDesc); // need to set the time_base on the AVCodecContext of the AVStream - av_reduce(&stream.codec->time_base.num, &stream.codec->time_base.den, audioDesc.getAVCodecContext().time_base.num, + av_reduce(&stream.time_base.num, &stream.time_base.den, audioDesc.getAVCodecContext().time_base.num, audioDesc.getAVCodecContext().time_base.den, INT_MAX); OutputStream* outputStream = new OutputStream(*this, _formatContext.getNbStreams() - 1); @@ -88,6 +95,29 @@ IOutputStream& OutputFile::addAudioStream(const AudioCodec& audioDesc) return *outputStream; } +IOutputStream& OutputFile::addCustomStream(const ICodec& iCodecDesc) +{ + AVStream& stream = _formatContext.addAVStream(iCodecDesc.getAVCodec()); + + stream.codecpar->codec_type = iCodecDesc.getAVCodecContext().codec_type; + stream.codecpar->codec_id = iCodecDesc.getAVCodecContext().codec_id; + stream.codecpar->codec_tag = iCodecDesc.getAVCodecContext().codec_tag; + + stream.codecpar->sample_rate = 48000; + stream.codecpar->channels = 1; + stream.codecpar->channel_layout = AV_CH_LAYOUT_MONO; + stream.codecpar->format = AV_SAMPLE_FMT_S32; + stream.codecpar->frame_size = 1920; + + // need to set the time_base on the AVCodecContext of the AVStream + av_reduce(&stream.time_base.num, &stream.time_base.den, 1, 1, INT_MAX); + + OutputStream* outputStream = new OutputStream(*this, _formatContext.getNbStreams() - 1); + _outputStreams.push_back(outputStream); + + return *outputStream; +} + IOutputStream& OutputFile::addDataStream(const DataCodec& dataDesc) { _formatContext.addAVStream(dataDesc.getAVCodec()); @@ -117,7 +147,7 @@ IOutputStream& OutputFile::getStream(const size_t streamIndex) std::string OutputFile::getFilename() const { - return std::string(_formatContext.getAVFormatContext().filename); + return std::string(_formatContext.getAVFormatContext().url); } std::string OutputFile::getFormatName() const @@ -169,14 +199,13 @@ bool OutputFile::beginWrap() IOutputStream::EWrappingStatus OutputFile::wrap(const CodedData& data, const size_t streamIndex) { if(!data.getSize()) - return IOutputStream::eWrappingSuccess; + return IOutputStream::eWrappingSkip; LOG_DEBUG("Wrap on stream " << streamIndex << " (" << data.getSize() << " bytes for frame " << _frameCount.at(streamIndex) << ")") // Packet to wrap - AVPacket packet; - av_init_packet(&packet); + AVPacket packet = *av_packet_alloc(); packet.stream_index = streamIndex; packet.data = (uint8_t*)data.getData(); packet.size = data.getSize(); @@ -200,12 +229,12 @@ IOutputStream::EWrappingStatus OutputFile::wrap(const CodedData& data, const siz packet.dts = av_rescale_q(data.getAVPacket().dts, srcTimeBase, dstTimeBase); } // add stream PTS if already incremented - const int currentStreamPTS = _outputStreams.at(streamIndex)->getStreamPTS(); - if(packet.pts != AV_NOPTS_VALUE && packet.pts < currentStreamPTS) - { - packet.pts += currentStreamPTS; - packet.dts += currentStreamPTS; - } + // const int currentStreamPTS = _outputStreams.at(streamIndex)->getStreamPTS(); + // if(packet.pts != AV_NOPTS_VALUE && packet.pts < currentStreamPTS) + // { + // packet.pts += currentStreamPTS; + // packet.dts += currentStreamPTS; + // } } // copy duration of packet wrapped @@ -329,25 +358,35 @@ void OutputFile::setupRemainingWrappingOptions() void OutputFile::setOutputStream(AVStream& avStream, const ICodec& codec) { +#if LIBAVCODEC_VERSION_MAJOR < 59 // depending on the format, place global headers in extradata instead of every keyframe if(_formatContext.getAVOutputFormat().flags & AVFMT_GLOBALHEADER) { +#ifdef AV_CODEC_FLAG_GLOBAL_HEADER + avStream.codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; +#else avStream.codec->flags |= CODEC_FLAG_GLOBAL_HEADER; +#endif } // if the codec is experimental, allow it +#ifdef AV_CODEC_CAP_EXPERIMENTAL + if(codec.getAVCodec().capabilities & AV_CODEC_CAP_EXPERIMENTAL) +#else if(codec.getAVCodec().capabilities & CODEC_CAP_EXPERIMENTAL) +#endif { LOG_WARN("This codec is considered experimental by libav/ffmpeg:" << codec.getCodecName()); avStream.codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; } +#endif // some codecs need/can use extradata to decode uint8_t* srcExtradata = codec.getAVCodecContext().extradata; const int srcExtradataSize = codec.getAVCodecContext().extradata_size; - avStream.codec->extradata = (uint8_t*)av_malloc(srcExtradataSize + FF_INPUT_BUFFER_PADDING_SIZE); - memcpy(avStream.codec->extradata, srcExtradata, srcExtradataSize); - memset(((uint8_t*)avStream.codec->extradata) + srcExtradataSize, 0, FF_INPUT_BUFFER_PADDING_SIZE); - avStream.codec->extradata_size = codec.getAVCodecContext().extradata_size; + avStream.codecpar->extradata = (uint8_t*)av_malloc(srcExtradataSize + FF_INPUT_BUFFER_PADDING_SIZE); + memcpy(avStream.codecpar->extradata, srcExtradata, srcExtradataSize); + memset(((uint8_t*)avStream.codecpar->extradata) + srcExtradataSize, 0, FF_INPUT_BUFFER_PADDING_SIZE); + avStream.codecpar->extradata_size = codec.getAVCodecContext().extradata_size; } } diff --git a/src/AvTranscoder/file/OutputFile.hpp b/src/AvTranscoder/file/OutputFile.hpp index 4a18e4c0..c8fefc3b 100644 --- a/src/AvTranscoder/file/OutputFile.hpp +++ b/src/AvTranscoder/file/OutputFile.hpp @@ -37,6 +37,7 @@ class AvExport OutputFile : public IOutputFile IOutputStream& addVideoStream(const VideoCodec& videoDesc); IOutputStream& addAudioStream(const AudioCodec& audioDesc); IOutputStream& addDataStream(const DataCodec& dataDesc); + IOutputStream& addCustomStream(const ICodec& iCodecDesc); /** * @brief Open ressource, write header, and setup specific wrapping options given when call setupWrapping. diff --git a/src/AvTranscoder/filter/Filter.cpp b/src/AvTranscoder/filter/Filter.cpp index 07cc30d8..c5ac8fb4 100644 --- a/src/AvTranscoder/filter/Filter.cpp +++ b/src/AvTranscoder/filter/Filter.cpp @@ -15,7 +15,7 @@ Filter::Filter(const std::string& name, const std::string& options, const std::s , _options(options) , _instanceName(instanceName.empty() ? name : instanceName) { - _filter = avfilter_get_by_name(name.c_str()); + _filter = (AVFilter*)avfilter_get_by_name(name.c_str()); if(!_filter) { std::string msg("Cannot find filter "); diff --git a/src/AvTranscoder/filter/FilterGraph.cpp b/src/AvTranscoder/filter/FilterGraph.cpp index 8b960ef9..58f62fdb 100644 --- a/src/AvTranscoder/filter/FilterGraph.cpp +++ b/src/AvTranscoder/filter/FilterGraph.cpp @@ -211,7 +211,7 @@ bool FilterGraph::areInputFrameSizesEqual(const std::vector& inputs) if(!inputs.size() || inputs.size() == 1) return true; - const int frameSize = inputs.at(0)->getDataSize(); + size_t frameSize = inputs.at(0)->getDataSize(); for(size_t index = 1; index < inputs.size(); ++index) { if(frameSize != inputs.at(index)->getDataSize()) @@ -275,7 +275,7 @@ void FilterGraph::process(const std::vector& inputs, IFrame& output) { // Retrieve frame from buffer or directly from input IFrame* inputFrame = (bypassBuffers)? inputs.at(index) : _inputAudioFrameBuffers.at(index).getFrameSampleNb(minInputFrameSamplesNb); - const int ret = av_buffersrc_add_frame_flags(_filters.at(index)->getAVFilterContext(), &inputFrame->getAVFrame(), AV_BUFFERSRC_FLAG_PUSH); + const int ret = av_buffersrc_add_frame_flags(_filters.at(index)->getAVFilterContext(), &inputFrame->getAVFrame(), AV_BUFFERSRC_FLAG_KEEP_REF); if(ret < 0) { diff --git a/src/AvTranscoder/log.cpp b/src/AvTranscoder/log.cpp index 5eab4325..98ffd649 100644 --- a/src/AvTranscoder/log.cpp +++ b/src/AvTranscoder/log.cpp @@ -56,7 +56,7 @@ void Logger::log(const int level, const std::string& msg) logMessage += "\n"; // send message - av_log(NULL, level, logMessage.c_str()); + av_log(NULL, level, "%s", logMessage.c_str()); } void Logger::logInFile() diff --git a/src/AvTranscoder/properties/AudioProperties.cpp b/src/AvTranscoder/properties/AudioProperties.cpp index 2ec52828..327edc24 100644 --- a/src/AvTranscoder/properties/AudioProperties.cpp +++ b/src/AvTranscoder/properties/AudioProperties.cpp @@ -47,6 +47,8 @@ std::string AudioProperties::getSampleFormatLongName() const return "signed 16 bits"; case AV_SAMPLE_FMT_S32: return "signed 32 bits"; + case AV_SAMPLE_FMT_S64: + return "signed 64 bits"; case AV_SAMPLE_FMT_FLT: return "float"; case AV_SAMPLE_FMT_DBL: @@ -57,6 +59,8 @@ std::string AudioProperties::getSampleFormatLongName() const return "signed 16 bits, planar"; case AV_SAMPLE_FMT_S32P: return "signed 32 bits, planar"; + case AV_SAMPLE_FMT_S64P: + return "signed 64 bits, planar"; case AV_SAMPLE_FMT_FLTP: return "float, planar"; case AV_SAMPLE_FMT_DBLP: @@ -113,7 +117,7 @@ size_t AudioProperties::getBitRate() const if(_codecContext->bit_rate) return _codecContext->bit_rate; - LOG_WARN("The bitrate of the stream '" << _streamIndex << "' of file '" << _formatContext->filename << "' is unknown.") + LOG_WARN("The bitrate of the stream '" << _streamIndex << "' of file '" << _formatContext->url << "' is unknown.") LOG_INFO("Compute the audio bitrate (suppose PCM audio data).") const int bitsPerSample = av_get_bits_per_sample(_codecContext->codec_id); // 0 if unknown for the given codec @@ -140,7 +144,7 @@ size_t AudioProperties::getNbSamples() const throw std::runtime_error("unknown format context"); size_t nbSamples = _formatContext->streams[_streamIndex]->nb_frames; if(nbSamples == 0) - nbSamples = getSampleRate() * getNbChannels() * getDuration(); + nbSamples = getSampleRate() * getDuration(); return nbSamples; } diff --git a/src/AvTranscoder/properties/AudioProperties.hpp b/src/AvTranscoder/properties/AudioProperties.hpp index a82f2462..68c8d839 100644 --- a/src/AvTranscoder/properties/AudioProperties.hpp +++ b/src/AvTranscoder/properties/AudioProperties.hpp @@ -22,7 +22,7 @@ class AvExport AudioProperties : public StreamProperties size_t getBitRate() const; ///< in bits/s, 0 if unknown size_t getSampleRate() const; size_t getNbChannels() const; - size_t getNbSamples() const; ///< All the channels are included. + size_t getNbSamples() const; ///< For one channel. size_t getTicksPerFrame() const; diff --git a/src/AvTranscoder/properties/DataProperties.cpp b/src/AvTranscoder/properties/DataProperties.cpp index cba9cdeb..f81a19c3 100644 --- a/src/AvTranscoder/properties/DataProperties.cpp +++ b/src/AvTranscoder/properties/DataProperties.cpp @@ -13,8 +13,7 @@ namespace avtranscoder void DataProperties::detectAncillaryData() { - AVPacket pkt; - av_init_packet(&pkt); + AVPacket pkt = *av_packet_alloc(); bool detection = false; @@ -73,7 +72,7 @@ void DataProperties::detectAncillaryData() detection = true; } - av_free_packet(&pkt); + av_packet_unref(&pkt); if(detection) break; diff --git a/src/AvTranscoder/properties/FileProperties.cpp b/src/AvTranscoder/properties/FileProperties.cpp index 6c135a7b..207850ac 100644 --- a/src/AvTranscoder/properties/FileProperties.cpp +++ b/src/AvTranscoder/properties/FileProperties.cpp @@ -47,7 +47,7 @@ void FileProperties::extractStreamProperties(IProgress& progress, const EAnalyse // reload properties for(size_t streamIndex = 0; streamIndex < _formatContext->getNbStreams(); ++streamIndex) { - switch(_formatContext->getAVStream(streamIndex).codec->codec_type) + switch(_formatContext->getAVStream(streamIndex).codecpar->codec_type) { case AVMEDIA_TYPE_VIDEO: { @@ -139,9 +139,9 @@ void FileProperties::extractStreamProperties(IProgress& progress, const EAnalyse std::string FileProperties::getFilename() const { - if(!_avFormatContext || !_avFormatContext->filename) + if(!_avFormatContext || !_avFormatContext->url) throw std::runtime_error("unknown file name"); - return _avFormatContext->filename; + return _avFormatContext->url; } std::string FileProperties::getFormatName() const diff --git a/src/AvTranscoder/properties/PixelProperties.cpp b/src/AvTranscoder/properties/PixelProperties.cpp index 2d761e01..1ca56fe3 100644 --- a/src/AvTranscoder/properties/PixelProperties.cpp +++ b/src/AvTranscoder/properties/PixelProperties.cpp @@ -64,7 +64,7 @@ size_t PixelProperties::getMaxNbBitsInChannels() const size_t maxNbBitsInChannels = 0; for(unsigned int channelIndex = 0; channelIndex < _pixelDesc->nb_components; ++channelIndex) { - const size_t nbBits = _pixelDesc->comp[channelIndex].depth_minus1 + 1; + const size_t nbBits = _pixelDesc->comp[channelIndex].depth; if(nbBits > maxNbBitsInChannels) maxNbBitsInChannels = nbBits; } @@ -204,16 +204,11 @@ bool PixelProperties::isRgbPixelData() const return (_pixelDesc->flags & AV_PIX_FMT_FLAG_RGB) == AV_PIX_FMT_FLAG_RGB; } -bool PixelProperties::isPseudoPaletted() const -{ - if(!_pixelDesc) +bool PixelProperties::isPaletted() const { + if (!_pixelDesc) throw std::runtime_error("unable to find pixel description."); -#if LIBAVCODEC_VERSION_MAJOR > 53 - return (_pixelDesc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) == AV_PIX_FMT_FLAG_PSEUDOPAL; -#else - return false; -#endif + return (_pixelDesc->flags & AV_PIX_FMT_FLAG_PAL) == AV_PIX_FMT_FLAG_PAL; } std::vector PixelProperties::getChannels() const @@ -227,7 +222,7 @@ std::vector PixelProperties::getChannels() const Channel c; c.id = channel; c.chromaHeight = (size_t)_pixelDesc->comp[channel].plane; - c.bitStep = (size_t)_pixelDesc->comp[channel].step_minus1; + c.bitStep = (size_t)_pixelDesc->comp[channel].step - 1; channels.push_back(c); } return channels; @@ -315,7 +310,11 @@ PropertyVector& PixelProperties::fillVector(PropertyVector& data) const addProperty(data, "bitWiseAcked", &PixelProperties::isBitWisePacked); addProperty(data, "isHardwareAccelerated", &PixelProperties::isHardwareAccelerated); addProperty(data, "rgbPixel", &PixelProperties::isRgbPixelData); +#if LIBAVCODEC_VERSION_MAJOR > 58 + addProperty(data, "isPaletted", &PixelProperties::isPaletted); +#else addProperty(data, "isPseudoPaletted", &PixelProperties::isPseudoPaletted); +#endif try { diff --git a/src/AvTranscoder/properties/PixelProperties.hpp b/src/AvTranscoder/properties/PixelProperties.hpp index 5e087ef2..56607ce8 100644 --- a/src/AvTranscoder/properties/PixelProperties.hpp +++ b/src/AvTranscoder/properties/PixelProperties.hpp @@ -71,7 +71,7 @@ class AvExport PixelProperties bool isBitWisePacked() const; bool isHardwareAccelerated() const; bool isRgbPixelData() const; - bool isPseudoPaletted() const; + bool isPaletted() const; std::vector getChannels() const; diff --git a/src/AvTranscoder/properties/StreamProperties.cpp b/src/AvTranscoder/properties/StreamProperties.cpp index 6f6fcf02..a0e6cac1 100644 --- a/src/AvTranscoder/properties/StreamProperties.cpp +++ b/src/AvTranscoder/properties/StreamProperties.cpp @@ -27,19 +27,21 @@ StreamProperties::StreamProperties(const FileProperties& fileProperties, const s ss << "Stream at index " << _streamIndex << " does not exist."; throw std::runtime_error(ss.str()); } - _codecContext = _formatContext->streams[_streamIndex]->codec; + + AVStream* stream = _formatContext->streams[_streamIndex]; + _codec = avcodec_find_decoder(stream->codecpar->codec_id); + _codecContext = avcodec_alloc_context3(_codec); + + avcodec_parameters_to_context(_codecContext, stream->codecpar); + _codecContext->time_base = stream->time_base; + _codecContext->coded_side_data = stream->side_data; } // find the decoder - if(_formatContext && _codecContext) + if(_formatContext && _codecContext && _codec) { - _codec = avcodec_find_decoder(_codecContext->codec_id); - - if(_codec) - { - // load specific options of the codec - loadOptions(_options, _codecContext); - } + // load specific options of the codec + loadOptions(_options, _codecContext); } } @@ -67,7 +69,7 @@ float StreamProperties::getDuration() const const size_t duration = _formatContext->streams[_streamIndex]->duration; if(duration == (size_t)AV_NOPTS_VALUE) { - LOG_WARN("The duration of the stream '" << _streamIndex << "' of file '" << _formatContext->filename + LOG_WARN("The duration of the stream '" << _streamIndex << "' of file '" << _formatContext->url << "' is unknown.") return 0; } @@ -78,7 +80,7 @@ AVMediaType StreamProperties::getStreamType() const { if(!_formatContext) throw std::runtime_error("unknown format context"); - return _formatContext->streams[_streamIndex]->codec->codec_type; + return _formatContext->streams[_streamIndex]->codecpar->codec_type; } size_t StreamProperties::getCodecId() const @@ -93,8 +95,13 @@ std::string StreamProperties::getCodecName() const if(!_codecContext || !_codec) throw std::runtime_error("unknown codec"); +#ifdef AV_CODEC_CAP_TRUNCATED + if(_codec->capabilities & AV_CODEC_CAP_TRUNCATED) + _codecContext->flags |= AV_CODEC_FLAG_TRUNCATED; +#else if(_codec->capabilities & CODEC_CAP_TRUNCATED) _codecContext->flags |= CODEC_FLAG_TRUNCATED; +#endif if(!_codec->name) throw std::runtime_error("unknown codec name"); @@ -107,9 +114,13 @@ std::string StreamProperties::getCodecLongName() const if(!_codecContext || !_codec) throw std::runtime_error("unknown codec"); +#ifdef AV_CODEC_CAP_TRUNCATED + if(_codec->capabilities & AV_CODEC_CAP_TRUNCATED) + _codecContext->flags |= AV_CODEC_FLAG_TRUNCATED; +#else if(_codec->capabilities & CODEC_CAP_TRUNCATED) _codecContext->flags |= CODEC_FLAG_TRUNCATED; - +#endif if(!_codec->long_name) throw std::runtime_error("unknown codec long name"); diff --git a/src/AvTranscoder/properties/StreamProperties.hpp b/src/AvTranscoder/properties/StreamProperties.hpp index 2820119b..fca5ffab 100644 --- a/src/AvTranscoder/properties/StreamProperties.hpp +++ b/src/AvTranscoder/properties/StreamProperties.hpp @@ -71,7 +71,7 @@ class AvExport StreamProperties const FileProperties* _fileProperties; ///< Has link (no ownership) const AVFormatContext* _formatContext; ///< Has link (no ownership) AVCodecContext* _codecContext; ///< Has link (no ownership) - AVCodec* _codec; ///< Has link (no ownership) + const AVCodec* _codec; ///< Has link (no ownership) size_t _streamIndex; PropertyVector _metadatas; diff --git a/src/AvTranscoder/properties/VideoProperties.cpp b/src/AvTranscoder/properties/VideoProperties.cpp index 84179b4c..d61dcede 100644 --- a/src/AvTranscoder/properties/VideoProperties.cpp +++ b/src/AvTranscoder/properties/VideoProperties.cpp @@ -35,7 +35,7 @@ VideoProperties::VideoProperties(const FileProperties& fileProperties, const siz if(_codecContext) { _pixelProperties = PixelProperties(_codecContext->pix_fmt); - _firstGopTimeCode = _codecContext->timecode_frame_start; + _firstGopTimeCode = _formatContext->start_time; } switch(_levelAnalysis) @@ -62,8 +62,13 @@ std::string VideoProperties::getProfileName() const if(!_codecContext || !_codec) throw std::runtime_error("unknown codec"); +#ifdef AV_CODEC_CAP_TRUNCATED + if(_codec->capabilities & AV_CODEC_CAP_TRUNCATED) + _codecContext->flags |= AV_CODEC_FLAG_TRUNCATED; +#else if(_codec->capabilities & CODEC_CAP_TRUNCATED) _codecContext->flags |= CODEC_FLAG_TRUNCATED; +#endif const char* profile = NULL; if((profile = av_get_profile_name(_codec, getProfile())) == NULL) @@ -345,7 +350,7 @@ size_t VideoProperties::getBitRate() const // return bit rate of stream if present or VBR mode if(_codecContext->bit_rate || _codecContext->rc_max_rate) return _codecContext->bit_rate; - LOG_WARN("The bitrate of the stream '" << _streamIndex << "' of file '" << _formatContext->filename << "' is unknown.") + LOG_WARN("The bitrate of the stream '" << _streamIndex << "' of file '" << _formatContext->url << "' is unknown.") if(_levelAnalysis == eAnalyseLevelHeader) { @@ -366,6 +371,14 @@ size_t VideoProperties::getMaxBitRate() const { if(!_codecContext) throw std::runtime_error("unknown codec context"); + + if (_codecContext->rc_max_rate == 0 + && _codecContext->coded_side_data + && _codecContext->coded_side_data->type == AV_PKT_DATA_CPB_PROPERTIES) { + const AVCPBProperties* prop = (AVCPBProperties*) _codecContext->coded_side_data->data; + return prop->max_bitrate; + } + return _codecContext->rc_max_rate; } @@ -373,6 +386,14 @@ size_t VideoProperties::getMinBitRate() const { if(!_codecContext) throw std::runtime_error("unknown codec context"); + + if (_codecContext->rc_max_rate == 0 + && _codecContext->coded_side_data + && _codecContext->coded_side_data->type == AV_PKT_DATA_CPB_PROPERTIES) { + const AVCPBProperties* prop = (AVCPBProperties*) _codecContext->coded_side_data->data; + return prop->min_bitrate; + } + return _codecContext->rc_min_rate; } @@ -384,7 +405,7 @@ size_t VideoProperties::getNbFrames() const const size_t nbFrames = _formatContext->streams[_streamIndex]->nb_frames; if(nbFrames) return nbFrames; - LOG_WARN("The number of frames of the stream '" << _streamIndex << "' of file '" << _formatContext->filename + LOG_WARN("The number of frames of the stream '" << _streamIndex << "' of file '" << _formatContext->url << "' is unknown.") if(_levelAnalysis == eAnalyseLevelHeader) @@ -427,7 +448,8 @@ size_t VideoProperties::getDtgActiveFormat() const { if(!_codecContext) throw std::runtime_error("unknown codec context"); - return _codecContext->dtg_active_format; + // return _codecContext->dtg_active_format; + return 0; } size_t VideoProperties::getReferencesFrames() const @@ -466,7 +488,7 @@ float VideoProperties::getDuration() const const float duration = StreamProperties::getDuration(); if(duration != 0) return duration; - LOG_WARN("The duration of the stream '" << _streamIndex << "' of file '" << _formatContext->filename << "' is unknown.") + LOG_WARN("The duration of the stream '" << _streamIndex << "' of file '" << _formatContext->url << "' is unknown.") if(_levelAnalysis == eAnalyseLevelHeader) { @@ -552,7 +574,7 @@ size_t VideoProperties::analyseGopStructure(IProgress& progress) AVFrame& avFrame = frame.getAVFrame(); _gopStructure.push_back( - std::make_pair(av_get_picture_type_char(avFrame.pict_type), av_frame_get_pkt_size(&avFrame))); + std::make_pair(av_get_picture_type_char(avFrame.pict_type), avFrame.pkt_size)); _isInterlaced = avFrame.interlaced_frame; _isTopFieldFirst = avFrame.top_field_first; if(avFrame.pict_type == AV_PICTURE_TYPE_I) diff --git a/src/AvTranscoder/stream/IOutputStream.hpp b/src/AvTranscoder/stream/IOutputStream.hpp index 3c7393c9..4acdad2d 100644 --- a/src/AvTranscoder/stream/IOutputStream.hpp +++ b/src/AvTranscoder/stream/IOutputStream.hpp @@ -16,9 +16,10 @@ class AvExport IOutputStream **/ enum EWrappingStatus { - eWrappingSuccess = 0, - eWrappingWaitingForData, - eWrappingError, + eWrappingSuccess = 0, ///< The wrapping succeeded + eWrappingWaitingForData, ///< The wrapper expects more data to complete the writing process + eWrappingSkip, ///< The wrapper receives empty data, so nothing is written + eWrappingError, ///< An error occurred during the wrapping process }; virtual ~IOutputStream(){}; diff --git a/src/AvTranscoder/stream/InputStream.cpp b/src/AvTranscoder/stream/InputStream.cpp index 8f465694..31072a1c 100644 --- a/src/AvTranscoder/stream/InputStream.cpp +++ b/src/AvTranscoder/stream/InputStream.cpp @@ -21,7 +21,17 @@ InputStream::InputStream(InputFile& inputFile, const size_t streamIndex) , _streamIndex(streamIndex) , _isActivated(false) { - AVCodecContext* context = _inputFile->getFormatContext().getAVStream(_streamIndex).codec; + AVStream& avStream = _inputFile->getFormatContext().getAVStream(_streamIndex); + AVCodecParameters* codecParameters = avStream.codecpar; + + const AVCodec* codec = avcodec_find_decoder(codecParameters->codec_id); + AVCodecContext* context = avcodec_alloc_context3(codec); + + int ret = avcodec_parameters_to_context(context, codecParameters); + context->time_base = avStream.time_base; + + if (ret < 0) + throw std::runtime_error("Failed to copy decoder parameters to input stream context"); switch(context->codec_type) { @@ -122,9 +132,10 @@ void InputStream::addPacket(const AVPacket& packet) return; } - LOG_DEBUG("Add a packet data for the stream " << _streamIndex << " to the cache") - _streamCache.push(CodedData()); - _streamCache.back().copyData(packet.data, packet.size); + LOG_DEBUG("Add a packet data for the stream " << _streamIndex << " to the cache"); + CodedData codedData; + codedData.copyData(packet.data, packet.size); + _streamCache.push(codedData); } void InputStream::clearBuffering() diff --git a/src/AvTranscoder/stream/OutputStream.cpp b/src/AvTranscoder/stream/OutputStream.cpp index 350c1ace..92034dc6 100644 --- a/src/AvTranscoder/stream/OutputStream.cpp +++ b/src/AvTranscoder/stream/OutputStream.cpp @@ -11,20 +11,45 @@ OutputStream::OutputStream(OutputFile& outputFile, const size_t streamIndex) : IOutputStream() , _outputFile(outputFile) , _outputAVStream(outputFile.getFormatContext().getAVStream(streamIndex)) + , _codecContext() , _streamIndex(streamIndex) , _wrappedPacketsDuration(0) , _lastWrappedPacketDuration(0) , _isPTSGenerated(false) { + const AVCodec* codec = avcodec_find_encoder(_outputAVStream.codecpar->codec_id); + _codecContext = avcodec_alloc_context3(codec); + + int ret = avcodec_parameters_to_context(_codecContext, _outputAVStream.codecpar); + if (ret < 0) + throw std::runtime_error("Failed to copy encoder parameters to output stream context"); + +#if LIBAVCODEC_VERSION_MAJOR > 58 + // depending on the format, place global headers in extradata instead of every keyframe +#ifdef AV_CODEC_FLAG_GLOBAL_HEADER + if(_outputFile.getFormatContext().getAVOutputFormat().flags & AVFMT_GLOBALHEADER) + { + _codecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + } +#endif + + // if the codec is experimental, allow it +#ifdef AV_CODEC_CAP_EXPERIMENTAL + if(codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) + { + LOG_WARN("This codec is considered experimental by libav/ffmpeg:" << codec->name); + _codecContext->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; + } +#endif +#endif } float OutputStream::getStreamDuration() const { - const AVFrac& outputPTS = _outputAVStream.pts; const AVRational& outputTimeBase = _outputAVStream.time_base; // check floating point exception - if(outputTimeBase.den == 0 || outputPTS.den == 0) + if(outputTimeBase.den == 0) { LOG_WARN("Cannot compute stream duration of output stream at index " << _streamIndex) return 0.f; @@ -36,7 +61,7 @@ float OutputStream::getStreamDuration() const // returns the pts of the last muxed packet, converted from timebase to seconds return av_q2d(outputTimeBase) * av_stream_get_end_pts(&_outputAVStream); #else - return av_q2d(outputTimeBase) * (outputPTS.val + (outputPTS.num / outputPTS.den)); + return av_q2d(outputTimeBase) * _outputAVStream.pts; #endif } @@ -45,11 +70,12 @@ size_t OutputStream::getNbFrames() const return _outputAVStream.nb_frames; } -int OutputStream::getStreamPTS() const -{ - const AVFrac& outputPTS = _outputAVStream.pts; - return (outputPTS.val + (outputPTS.num / outputPTS.den)); -} +// int OutputStream::getStreamPTS() const +// { +// // const AVFrac& outputPTS = _outputAVStream.pts; +// // return (outputPTS.val + (outputPTS.num / outputPTS.den)); +// return _outputAVStream.pts; +// } IOutputStream::EWrappingStatus OutputStream::wrap(const CodedData& data) { @@ -73,7 +99,7 @@ IOutputStream::EWrappingStatus OutputStream::wrap(const CodedData& data) _wrappedPacketsDuration += data.getAVPacket().duration; else { - switch(_outputAVStream.codec->codec_type) + switch(_outputAVStream.codecpar->codec_type) { case AVMEDIA_TYPE_VIDEO: { @@ -85,14 +111,13 @@ IOutputStream::EWrappingStatus OutputStream::wrap(const CodedData& data) Rational audioPacketDuration; audioPacketDuration.num = 0; audioPacketDuration.den = 0; - const int frame_size = av_get_audio_frame_duration(_outputAVStream.codec, data.getSize()); - if(frame_size <= 0 || _outputAVStream.codec->sample_rate <= 0) + const int frame_size = av_get_audio_frame_duration(_codecContext, data.getSize()); + if (frame_size <= 0 || _outputAVStream.codecpar->sample_rate <= 0) break; audioPacketDuration.num = frame_size; - audioPacketDuration.den = _outputAVStream.codec->sample_rate; - _wrappedPacketsDuration += av_rescale(1, audioPacketDuration.num * (int64_t)_outputAVStream.time_base.den * - _outputAVStream.codec->ticks_per_frame, - audioPacketDuration.den * (int64_t)_outputAVStream.time_base.num); + audioPacketDuration.den = _outputAVStream.codecpar->sample_rate; + _wrappedPacketsDuration += av_rescale(1, audioPacketDuration.num * (int64_t) _outputAVStream.time_base.den * _codecContext->ticks_per_frame, + audioPacketDuration.den * (int64_t) _outputAVStream.time_base.num); break; } default: diff --git a/src/AvTranscoder/stream/OutputStream.hpp b/src/AvTranscoder/stream/OutputStream.hpp index b68d93a5..b896edb7 100644 --- a/src/AvTranscoder/stream/OutputStream.hpp +++ b/src/AvTranscoder/stream/OutputStream.hpp @@ -18,7 +18,7 @@ class AvExport OutputStream : public IOutputStream size_t getStreamIndex() const { return _streamIndex; } float getStreamDuration() const; size_t getNbFrames() const; ///< If audio stream, returns number of packets - int getStreamPTS() const; ///< Get current AVStream PTS + // int getStreamPTS() const; ///< Get current AVStream PTS bool isPTSGenerated() const { return _isPTSGenerated; } IOutputStream::EWrappingStatus wrap(const CodedData& data); @@ -26,6 +26,7 @@ class AvExport OutputStream : public IOutputStream private: OutputFile& _outputFile; ///< Has link (no ownership) const AVStream& _outputAVStream; ///< Has link (no ownership) + AVCodecContext* _codecContext; size_t _streamIndex; ///< Index of the stream in the output file diff --git a/src/AvTranscoder/transcoder/StreamTranscoder.cpp b/src/AvTranscoder/transcoder/StreamTranscoder.cpp index 78253644..0882144b 100644 --- a/src/AvTranscoder/transcoder/StreamTranscoder.cpp +++ b/src/AvTranscoder/transcoder/StreamTranscoder.cpp @@ -246,6 +246,93 @@ StreamTranscoder::StreamTranscoder(const std::vector& inputStre setOffset(offset); } +StreamTranscoder::StreamTranscoder(const std::vector& inputStreamsDesc, std::vector& inputStreams, IOutputFile& outputFile, + IEncoder* encoder, const float offset) + : _inputStreamDesc(inputStreamsDesc) + , _inputStreams(inputStreams) + , _outputStream(NULL) + , _decodedData() + , _filteredData(NULL) + , _transformedData(NULL) + , _inputDecoders() + , _generators() + , _currentDecoder(NULL) + , _outputEncoder(encoder) + , _transform(NULL) + , _filterGraph(NULL) + , _firstInputStreamIndex(std::numeric_limits::max()) + , _offset(offset) + , _needToSwitchToGenerator(false) +{ + // add as many decoders as input streams + size_t nbOutputChannels = 0; + for(size_t index = 0; index < inputStreams.size(); ++index) + { + if(_inputStreams.at(index) != NULL) + { + LOG_INFO("add decoder for input stream " << index); + addDecoder(_inputStreamDesc.at(index), *_inputStreams.at(index)); + nbOutputChannels += _inputStreamDesc.at(index)._channelIndexArray.size(); + if(_firstInputStreamIndex == std::numeric_limits::max()) + _firstInputStreamIndex = index; + } + } + + IInputStream& inputStream = *_inputStreams.at(_firstInputStreamIndex); + const InputStreamDesc& inputStreamDesc = inputStreamsDesc.at(_firstInputStreamIndex); + + // create a transcode case + switch(inputStream.getProperties().getStreamType()) + { + case AVMEDIA_TYPE_AUDIO: + { + + // filter + _filterGraph = new FilterGraph(inputStream.getAudioCodec()); + // merge two or more audio streams into a single multi-channel stream. + if(inputStreams.size() > 1) + { + std::stringstream mergeOptions; + mergeOptions << "inputs=" << inputStreams.size(); + _filterGraph->addFilter("amerge", mergeOptions.str()); + } + + AudioFrameDesc inputFrameDesc(inputStream.getAudioCodec().getAudioFrameDesc()); + + // output stream + AudioCodec outputAudioCodec(_outputEncoder->getCodec().getCodecType(), _outputEncoder->getCodec().getCodecId()); + AudioFrameDesc outputAudioFrameDesc = outputAudioCodec.getAudioFrameDesc(); + if(outputAudioFrameDesc._sampleRate == 0) { + outputAudioFrameDesc._sampleRate = inputFrameDesc._sampleRate; + } + if(outputAudioFrameDesc._sampleFormat == AV_SAMPLE_FMT_NONE) { + outputAudioFrameDesc._sampleFormat = inputFrameDesc._sampleFormat; + } + outputAudioFrameDesc._nbChannels = nbOutputChannels; + outputAudioCodec.setAudioParameters(outputAudioFrameDesc); + + _outputStream = &outputFile.addAudioStream(outputAudioCodec); + + // buffers to process + if(inputStreamDesc.demultiplexing()) + inputFrameDesc._nbChannels = nbOutputChannels; + + _filteredData = new AudioFrame(inputFrameDesc); + _transformedData = new AudioFrame(outputAudioFrameDesc); + + // transform + _transform = new AudioTransform(); + break; + } + default: + { + throw std::runtime_error("unupported stream type"); + break; + } + } + setOffset(offset); +} + void StreamTranscoder::addDecoder(const InputStreamDesc& inputStreamDesc, IInputStream& inputStream) { // create a transcode case @@ -429,6 +516,26 @@ StreamTranscoder::StreamTranscoder(IOutputFile& outputFile, const ProfileLoader: } } +StreamTranscoder::StreamTranscoder(IOutputFile& outputFile, IEncoder* encoder) + : _inputStreamDesc() + , _inputStreams() + , _outputStream(NULL) + , _decodedData() + , _filteredData(NULL) + , _transformedData(NULL) + , _inputDecoders() + , _generators() + , _currentDecoder(NULL) + , _outputEncoder(encoder) + , _transform(NULL) + , _filterGraph(NULL) + , _firstInputStreamIndex(0) + , _offset(0) + , _needToSwitchToGenerator(false) +{ + _outputStream = &outputFile.addCustomStream(encoder->getCodec()); +} + StreamTranscoder::~StreamTranscoder() { for(std::vector::iterator it = _decodedData.begin(); it != _decodedData.end(); ++it) @@ -490,7 +597,7 @@ void StreamTranscoder::preProcessCodecLatency() _currentDecoder = NULL; } -bool StreamTranscoder::processFrame() +IOutputStream::EWrappingStatus StreamTranscoder::processFrame() { std::string msg = "Current process case of the stream is a "; switch(getProcessCase()) @@ -510,7 +617,20 @@ bool StreamTranscoder::processFrame() // Manage offset if(_offset > 0) { - const bool endOfOffset = _outputStream->getStreamDuration() >= _offset; + bool endOfOffset = false; + if(_currentDecoder == _generators.at(0)) + { + const double fps = 1.0 * _outputEncoder->getCodec().getAVCodecContext().time_base.den / + (_outputEncoder->getCodec().getAVCodecContext().time_base.num * _outputEncoder->getCodec().getAVCodecContext().ticks_per_frame); + const double frame_duration = 1.0 / fps; + const double generated_duration = _currentDecoder->getNbDecodedFrames() * frame_duration; + endOfOffset = generated_duration >= _offset; + } + else + { + endOfOffset = _outputStream->getStreamDuration() >= _offset; + } + if(endOfOffset) { LOG_INFO("End of positive offset") @@ -557,7 +677,7 @@ bool StreamTranscoder::processFrame() return processTranscode(); } -bool StreamTranscoder::processRewrap() +IOutputStream::EWrappingStatus StreamTranscoder::processRewrap() { assert(_inputStreams.size() == 1); assert(_outputStream != NULL); @@ -572,31 +692,20 @@ bool StreamTranscoder::processRewrap() switchToGeneratorDecoder(); return processTranscode(); } - return false; - } - - const IOutputStream::EWrappingStatus wrappingStatus = _outputStream->wrap(data); - switch(wrappingStatus) - { - case IOutputStream::eWrappingSuccess: - return true; - case IOutputStream::eWrappingWaitingForData: - // the wrapper needs more data to write the current packet - return processFrame(); - case IOutputStream::eWrappingError: - return false; + return IOutputStream::eWrappingError; } - return true; + return _outputStream->wrap(data); } -bool StreamTranscoder::processTranscode() +IOutputStream::EWrappingStatus StreamTranscoder::processTranscode() { assert(_outputStream != NULL); assert(_currentDecoder != NULL); assert(_outputEncoder != NULL); assert(! _decodedData.empty()); assert(_transform != NULL); + assert(_generators.size() == _inputDecoders.size()); LOG_DEBUG("StreamTranscoder::processTranscode") @@ -643,8 +752,7 @@ bool StreamTranscoder::processTranscode() } } - // Transform - CodedData data; + // Check decoding status bool continueProcess = true; for(size_t index = 0; index < decodingStatus.size(); ++index) { @@ -653,6 +761,17 @@ bool StreamTranscoder::processTranscode() if(!_filterGraph->hasFilters() || !_filterGraph->hasBufferedFrames(index)) { continueProcess = false; + if(_needToSwitchToGenerator) + { + switchToGeneratorDecoder(); + LOG_INFO("Force reallocation of the decoded data buffers since the decoders could have cleared them.") + for(std::vector::iterator it = _decodedData.begin(); it != _decodedData.end(); ++it) + { + if(! (*it)->isDataAllocated()) + (*it)->allocateData(); + } + return processTranscode(); + } break; } LOG_DEBUG("Some frames remain into filter graph buffer " << index); @@ -664,8 +783,10 @@ bool StreamTranscoder::processTranscode() } } + CodedData data; if(continueProcess) { + // Transform IFrame* dataToTransform = NULL; if(_filterGraph->hasFilters()) { @@ -683,6 +804,16 @@ bool StreamTranscoder::processTranscode() LOG_DEBUG("Encode") _outputEncoder->encodeFrame(*_transformedData, data); + + if(_filterGraph->hasFilters()) + { + LOG_DEBUG("Free filtered data") // filled from filter graph sink + if (_filteredData->isVideoFrame()) { + // Do not unref filter audio frame, to avoid reallocating it each time + av_frame_unref(&_filteredData->getAVFrame()); + } + _filteredData->freeData(); + } } else { @@ -700,25 +831,13 @@ bool StreamTranscoder::processTranscode() } return processTranscode(); } - return false; + return IOutputStream::eWrappingError; } } // Wrap LOG_DEBUG("wrap (" << data.getSize() << " bytes)") - const IOutputStream::EWrappingStatus wrappingStatus = _outputStream->wrap(data); - switch(wrappingStatus) - { - case IOutputStream::eWrappingSuccess: - return true; - case IOutputStream::eWrappingWaitingForData: - // the wrapper needs more data to write the current packet - return processFrame(); - case IOutputStream::eWrappingError: - return false; - } - - return true; + return _outputStream->wrap(data); } void StreamTranscoder::switchToGeneratorDecoder() diff --git a/src/AvTranscoder/transcoder/StreamTranscoder.hpp b/src/AvTranscoder/transcoder/StreamTranscoder.hpp index c72db0ea..9df3f114 100644 --- a/src/AvTranscoder/transcoder/StreamTranscoder.hpp +++ b/src/AvTranscoder/transcoder/StreamTranscoder.hpp @@ -40,10 +40,22 @@ class AvExport StreamTranscoder const ProfileLoader::Profile& profile, const float offset = 0); /** - * @brief Encode a generated stream + * @brief Transcode the given streams. + * @note The data are wrapped to one output stream. + **/ + StreamTranscoder(const std::vector& inputStreamsDesc, std::vector& inputStreams, IOutputFile& outputFile, + IEncoder* encoder, const float offset = 0); + + /** + * @brief Create a stream transcoder based on a profile. **/ StreamTranscoder(IOutputFile& outputFile, const ProfileLoader::Profile& profile); + /** + * @brief Create a stream transcoder with a custom Encoder. + **/ + StreamTranscoder(IOutputFile& outputFile, IEncoder* encoder); + ~StreamTranscoder(); /** @@ -57,7 +69,7 @@ class AvExport StreamTranscoder * @brief process a single frame for the current stream * @return the process status result */ - bool processFrame(); + IOutputStream::EWrappingStatus processFrame(); //@{ // Switch current decoder. @@ -88,6 +100,23 @@ class AvExport StreamTranscoder /// Returns a reference to the stream which wraps data IOutputStream& getOutputStream() const { return *_outputStream; } + /// Returns the total number of generated frames for this processed stream + size_t getNumberOfGeneratedFrames() const { + size_t generatedFrames = 0; + for (IDecoder* generator : _generators) { + generatedFrames += generator->getNbDecodedFrames(); + } + return generatedFrames; + } + /// Returns the total number of decoded frames for this processed stream + size_t getNumberOfDecodedFrames() const { + size_t decodedFrames = 0; + for (IDecoder* inputDecoder : _inputDecoders) { + decodedFrames += inputDecoder->getNbDecodedFrames(); + } + return decodedFrames; + } + /** * @brief Returns if the stream has the ability to switch to a generator. */ @@ -127,8 +156,8 @@ class AvExport StreamTranscoder void addDecoder(const InputStreamDesc& inputStreamDesc, IInputStream& inputStream); void addGenerator(const InputStreamDesc& inputStreamDesc, const ProfileLoader::Profile& profile); - bool processRewrap(); - bool processTranscode(); + IOutputStream::EWrappingStatus processRewrap(); + IOutputStream::EWrappingStatus processTranscode(); private: std::vector _inputStreamDesc; ///< Description of the data to extract from the input stream. diff --git a/src/AvTranscoder/transcoder/Transcoder.cpp b/src/AvTranscoder/transcoder/Transcoder.cpp index 5249f330..9db3174b 100644 --- a/src/AvTranscoder/transcoder/Transcoder.cpp +++ b/src/AvTranscoder/transcoder/Transcoder.cpp @@ -19,6 +19,7 @@ Transcoder::Transcoder(IOutputFile& outputFile) , _profileLoader(true) , _eProcessMethod(eProcessMethodLongest) , _mainStreamIndex(0) + , _processedFrames(0) , _outputDuration(0) { } @@ -30,6 +31,7 @@ Transcoder::~Transcoder() { delete(*it); } + for(std::vector::iterator it = _inputFiles.begin(); it != _inputFiles.end(); ++it) { delete(*it); @@ -73,6 +75,17 @@ void Transcoder::addStream(const InputStreamDesc& inputStreamDesc, const Profile addStream(inputStreamDescArray, profile, offset); } +void Transcoder::addStream(const InputStreamDesc& inputStreamDesc, IEncoder* encoder) +{ + // Check filename + if(!inputStreamDesc._filename.length()) + throw std::runtime_error("Can't transcode a stream without a filename indicated."); + + std::vector inputStreamDescArray; + inputStreamDescArray.push_back(inputStreamDesc); + addStream(inputStreamDescArray, encoder); +} + void Transcoder::addStream(const std::vector& inputStreamDescArray, const std::string& profileName, const float offset) { // Check number of inputs @@ -105,6 +118,15 @@ void Transcoder::addStream(const std::vector& inputStreamDescAr addTranscodeStream(inputStreamDescArray, profile, offset); } +void Transcoder::addStream(const std::vector& inputStreamDescArray, IEncoder* encoder) +{ + // Check number of inputs + if(inputStreamDescArray.empty()) + throw std::runtime_error("Need a description of at least one input stream to start the process."); + + addTranscodeStream(inputStreamDescArray, encoder); +} + void Transcoder::addGenerateStream(const std::string& encodingProfileName) { const ProfileLoader::Profile& encodingProfile = _profileLoader.getProfile(encodingProfileName); @@ -138,27 +160,88 @@ void Transcoder::preProcessCodecLatency() } bool Transcoder::processFrame() +{ + NoDisplayProgress progress; + return processFrame(progress); +} + +bool Transcoder::processFrame(IProgress& progress) { if(_streamTranscoders.size() == 0) return false; // For each stream, process a frame + bool result = true; for(size_t streamIndex = 0; streamIndex < _streamTranscoders.size(); ++streamIndex) { - LOG_DEBUG("Process stream " << streamIndex + 1 << "/" << _streamTranscoders.size()) + if(!processFrame(progress, streamIndex)) + result = false; + } + return result; +} - // if a stream failed to process - if(!_streamTranscoders.at(streamIndex)->processFrame()) - { +bool Transcoder::processFrame(IProgress& progress, const size_t& streamIndex) +{ + LOG_DEBUG("Process stream " << streamIndex + 1 << "/" << _streamTranscoders.size()) + + IOutputStream::EWrappingStatus status = _streamTranscoders.at(streamIndex)->processFrame(); + switch(status) + { + case IOutputStream::eWrappingSuccess: + if(streamIndex == 0) + _processedFrames++; + + if(!continueProcess(progress)) + return false; + return true; + + case IOutputStream::eWrappingWaitingForData: + // the wrapper needs more data to write the current packet + if(streamIndex == 0) + _processedFrames++; + + if(!continueProcess(progress)) + return false; + + return processFrame(progress, streamIndex); + + case IOutputStream::eWrappingSkip: + return true; + + case IOutputStream::eWrappingError: + // if a stream failed to process LOG_WARN("Failed to process the stream transcoder at index " << streamIndex) // if this is the end of the main stream - if(streamIndex == _mainStreamIndex) { + if(streamIndex == _mainStreamIndex) LOG_INFO("End of process because the main stream at index " << _mainStreamIndex << " failed to process a new frame.") - return false; - } - } + + return false; + default: + throw std::runtime_error("Unsupported wrapping status"); + } +} + +bool Transcoder::continueProcess(IProgress& progress) { + const float expectedOutputDuration = getExpectedOutputDuration(); + const float progressDuration = getCurrentOutputDuration(); + + // check if JobStatusCancel + if(progress.progress((progressDuration > expectedOutputDuration) ? expectedOutputDuration : progressDuration, + expectedOutputDuration) == eJobStatusCancel) + { + LOG_INFO("End of process because the job was canceled.") + return false; + } + + // check progressDuration + if(_eProcessMethod == eProcessMethodBasedOnDuration && progressDuration >= expectedOutputDuration) + { + LOG_INFO("End of process because the output program duration (" + << progressDuration << "s) is equal or upper than " << expectedOutputDuration << "s.") + return false; } + return true; } @@ -184,36 +267,15 @@ ProcessStat Transcoder::process(IProgress& progress) const float expectedOutputDuration = getExpectedOutputDuration(); LOG_INFO("The expected output duration of the program will be " << expectedOutputDuration << "s.") - size_t frame = 0; bool frameProcessed = true; while(frameProcessed) { - LOG_DEBUG("Process frame " << frame) - frameProcessed = processFrame(); - ++frame; - - const float progressDuration = getCurrentOutputDuration(); - - // check if JobStatusCancel - if(progress.progress((progressDuration > expectedOutputDuration) ? expectedOutputDuration : progressDuration, - expectedOutputDuration) == eJobStatusCancel) - { - LOG_INFO("End of process because the job was canceled.") - break; - } - - // check progressDuration - if(_eProcessMethod == eProcessMethodBasedOnDuration && progressDuration >= expectedOutputDuration) - { - LOG_INFO("End of process because the output program duration (" - << progressDuration << "s) is equal or upper than " << expectedOutputDuration << "s.") - break; - } + LOG_INFO("Process frame " << _processedFrames); + frameProcessed = processFrame(progress); } _outputFile.endWrap(); - - LOG_INFO("End of process: " << ++frame << " frames processed") + LOG_INFO("End of process: " << ++_processedFrames << " frames processed") LOG_INFO("Get process statistics") ProcessStat processStat; @@ -283,6 +345,42 @@ void Transcoder::addTranscodeStream(const std::vector& inputStr _streamTranscoders.push_back(_streamTranscodersAllocated.back()); } +void Transcoder::addTranscodeStream(const std::vector& inputStreamDescArray, IEncoder* encoder, const float offset) +{ + std::stringstream sources; + for(size_t index = 0; index < inputStreamDescArray.size(); ++index) + sources << inputStreamDescArray.at(index); + LOG_INFO("Add transcode stream from the following inputs:" << std::endl << sources.str() + << "with encoder=" << encoder->getCodec().getCodecName() << std::endl) + + // Create all streams from the given inputs + std::vector inputStreams; + AVMediaType commonStreamType = AVMEDIA_TYPE_UNKNOWN; + for(std::vector::const_iterator it = inputStreamDescArray.begin(); it != inputStreamDescArray.end(); ++it) + { + if(it->_filename.empty()) + { + inputStreams.push_back(NULL); + continue; + } + + InputFile* referenceFile = addInputFile(it->_filename, it->_streamIndex, offset); + inputStreams.push_back(&referenceFile->getStream(it->_streamIndex)); + + // Check stream type + const AVMediaType currentStreamType = referenceFile->getProperties().getStreamPropertiesWithIndex(it->_streamIndex).getStreamType(); + if(commonStreamType == AVMEDIA_TYPE_UNKNOWN) + commonStreamType = currentStreamType; + else if(currentStreamType != commonStreamType) + throw std::runtime_error("All the given inputs should be of the same type (video, audio...)."); + + } + + _streamTranscodersAllocated.push_back( + new StreamTranscoder(inputStreamDescArray, inputStreams, _outputFile, encoder, offset)); + _streamTranscoders.push_back(_streamTranscodersAllocated.back()); +} + InputFile* Transcoder::addInputFile(const std::string& filename, const int streamIndex, const float offset) { InputFile* referenceFile = NULL; @@ -560,10 +658,39 @@ void Transcoder::fillProcessStat(ProcessStat& processStat) if(encoder) { const AVCodecContext& encoderContext = encoder->getCodec().getAVCodecContext(); + +#ifdef AV_CODEC_FLAG_PSNR + if(encoderContext.coded_side_data && encoderContext.coded_side_data->type == AV_PKT_DATA_QUALITY_FACTOR && (encoderContext.flags & AV_CODEC_FLAG_PSNR)) +#else if(encoderContext.coded_frame && (encoderContext.flags & CODEC_FLAG_PSNR)) +#endif { - videoStat.setQuality(encoderContext.coded_frame->quality); - videoStat.setPSNR(encoderContext.coded_frame->error[0] / + uint8_t* coded_frame = encoderContext.coded_side_data->data; + uint32_t quality = (uint32_t) coded_frame[3] << 24 | + (uint32_t) coded_frame[2] << 16 | + (uint32_t) coded_frame[1] << 8 | + (uint32_t) coded_frame[0]; + // uint8_t picture_type = coded_frame[4]; + uint8_t error_count = coded_frame[5]; + + std::vector errors; + for (int i = 0; i < error_count; ++i) + { + int index = 6 + i; + errors.push_back( + (uint64_t) coded_frame[index + 7] << 56 | + (uint64_t) coded_frame[index + 6] << 48 | + (uint64_t) coded_frame[index + 5] << 40 | + (uint64_t) coded_frame[index + 4] << 32 | + (uint64_t) coded_frame[index + 3] << 24 | + (uint64_t) coded_frame[2] << 16 | + (uint64_t) coded_frame[1] << 8 | + (uint64_t) coded_frame[0] + ); + } + + videoStat.setQuality(quality); + videoStat.setPSNR((double) errors.at(0) / (encoderContext.width * encoderContext.height * 255.0 * 255.0)); } } diff --git a/src/AvTranscoder/transcoder/Transcoder.hpp b/src/AvTranscoder/transcoder/Transcoder.hpp index 986ce45c..9ba27f6e 100644 --- a/src/AvTranscoder/transcoder/Transcoder.hpp +++ b/src/AvTranscoder/transcoder/Transcoder.hpp @@ -63,6 +63,7 @@ class AvExport Transcoder // If offset is negative, the transcoder will seek in the stream and start process at this specific time. void addStream(const InputStreamDesc& inputStreamDesc, const std::string& profileName = "", const float offset = 0); void addStream(const InputStreamDesc& inputStreamDesc, const ProfileLoader::Profile& profile, const float offset = 0); + void addStream(const InputStreamDesc& inputStreamDesc, IEncoder* encoder); //@} //@{ @@ -71,6 +72,7 @@ class AvExport Transcoder // @param profile: if empty, get the profile from the inputs. void addStream(const std::vector& inputStreamDescArray, const std::string& profileName = "", float offset = 0); void addStream(const std::vector& inputStreamDescArray, const ProfileLoader::Profile& profile, const float offset = 0); + void addStream(const std::vector& inputStreamDescArray, IEncoder* encoder); //@} //@{ @@ -93,9 +95,11 @@ class AvExport Transcoder /** * @brief Process the next frame of all streams. + * @param progress: choose a progress, or create your own in C++ or in bindings by inherit IProgress class. * @return if a frame was processed or not. */ - bool processFrame(); + bool processFrame(IProgress& progress); + bool processFrame(); ///< Call processFrame with no display of progression /** * @brief Process all the streams, and ended the process depending on the transcode politic. @@ -135,10 +139,29 @@ class AvExport Transcoder void setProcessMethod(const EProcessMethod eProcessMethod, const size_t indexBasedStream = 0, const double outputDuration = 0); + /** + * @brief Returns the total number of generated frames for a specific stream + * @param streamIndex: the index of the stream + * @return The total number of generated frames + */ + size_t getNumberOfGeneratedFrames(const size_t streamIndex) const { + return _streamTranscoders.at(streamIndex)->getNumberOfGeneratedFrames(); + } + + /** + * @brief Returns the total number of decoded frames for a specific stream + * @param streamIndex: the index of the stream + * @return The total number of decoded frames + */ + size_t getNumberOfDecodedFrames(const size_t streamIndex) const { + return _streamTranscoders.at(streamIndex)->getNumberOfDecodedFrames(); + } + private: void addRewrapStream(const InputStreamDesc& inputStreamDesc, const float offset); void addTranscodeStream(const std::vector& inputStreamDescArray, const ProfileLoader::Profile& profile, const float offset = 0); + void addTranscodeStream(const std::vector& inputStreamDescArray, IEncoder* encoder, const float offset = 0); /** * @note If streamIndex is negative, activate all streams of the file. @@ -187,6 +210,19 @@ class AvExport Transcoder */ void manageSwitchToGenerator(); + /** + * @brief Process the next frame of the specified stream. + * @return whether a frame was processed or not. + */ + bool processFrame(IProgress& progress, const size_t& streamIndex); + + /** + * @brief Check whether the process is canceled or not, and whether the process reached the ending condition. + * @note The progress is updated in this function. + * @return whether the process must continue or stop. + */ + bool continueProcess(IProgress& progress); + /** * @brief Fill the given ProcessStat to summarize the process. */ @@ -202,10 +238,11 @@ class AvExport Transcoder ProfileLoader _profileLoader; ///< Objet to get existing profiles, and add new ones for the Transcoder. EProcessMethod _eProcessMethod; ///< Processing policy - size_t - _mainStreamIndex; ///< Index of stream used to stop the process. - float _outputDuration; ///< Duration of output media used to stop the process of transcode in case of - /// eProcessMethodBasedOnDuration. + + size_t _mainStreamIndex; ///< Index of stream used to stop the process. + size_t _processedFrames; ///< Counter for the number of processed frames. + + float _outputDuration; ///< Duration of output media used to stop the process of transcode in case of eProcessMethodBasedOnDuration. }; } diff --git a/src/AvTranscoder/util.cpp b/src/AvTranscoder/util.cpp index 30fe9a1b..ab4db74b 100644 --- a/src/AvTranscoder/util.cpp +++ b/src/AvTranscoder/util.cpp @@ -15,7 +15,7 @@ std::vector getSupportedPixelFormats(const std::string& videoCodecN std::vector pixelFormats; // all video codec concerned - if(videoCodecName == "") + if(videoCodecName.empty()) { const AVPixFmtDescriptor* pixFmtDesc = NULL; @@ -109,12 +109,13 @@ std::string getSampleFormatName(const AVSampleFormat sampleFormat) return formatName ? std::string(formatName) : ""; } -std::vector getAvailableFormats() +std::vector getAvailableFormats() { - std::vector formats; + std::vector formats; + void* formatOpaque = NULL; - AVOutputFormat* fmt = NULL; - while((fmt = av_oformat_next(fmt))) + const AVOutputFormat* fmt = NULL; + while((fmt = av_muxer_iterate(&formatOpaque))) { if(!fmt->name) continue; @@ -127,10 +128,10 @@ std::vector getAvailableFormats() NamesMap getAvailableFormatsNames() { NamesMap formatsNames; - std::vector formats = getAvailableFormats(); + std::vector formats = getAvailableFormats(); for(size_t i = 0; i < formats.size(); ++i) { - AVOutputFormat* fmt = formats.at(i); + const AVOutputFormat* fmt = formats.at(i); formatsNames.insert(std::make_pair(std::string(fmt->name), std::string(fmt->long_name ? fmt->long_name : ""))); } return formatsNames; @@ -139,10 +140,10 @@ NamesMap getAvailableFormatsNames() NamesMap getAvailableVideoFormatsNames() { NamesMap formatsNames; - std::vector formats = getAvailableFormats(); + std::vector formats = getAvailableFormats(); for(size_t i = 0; i < formats.size(); ++i) { - AVOutputFormat* fmt = formats.at(i); + const AVOutputFormat* fmt = formats.at(i); // skip format which cannot handle video if(fmt->video_codec == AV_CODEC_ID_NONE) continue; @@ -154,10 +155,10 @@ NamesMap getAvailableVideoFormatsNames() NamesMap getAvailableAudioFormatsNames() { NamesMap formatsNames; - std::vector formats = getAvailableFormats(); + std::vector formats = getAvailableFormats(); for(size_t i = 0; i < formats.size(); ++i) { - AVOutputFormat* fmt = formats.at(i); + const AVOutputFormat* fmt = formats.at(i); // skip format which cannot handle audio if(fmt->audio_codec == AV_CODEC_ID_NONE) continue; @@ -166,12 +167,13 @@ NamesMap getAvailableAudioFormatsNames() return formatsNames; } -std::vector getAvailableCodecs() +std::vector getAvailableCodecs() { - std::vector codecs; + std::vector codecs; - AVCodec* c = NULL; - while((c = av_codec_next(c))) + const AVCodec* c = NULL; + void* cOpaque = NULL; + while((c = av_codec_iterate(&cOpaque))) { if(!c->name) continue; @@ -184,10 +186,10 @@ std::vector getAvailableCodecs() NamesMap getAvailableVideoCodecsNames() { NamesMap videoCodecsNames; - std::vector codecs = getAvailableCodecs(); + std::vector codecs = getAvailableCodecs(); for(size_t i = 0; i < codecs.size(); ++i) { - AVCodec* c = codecs.at(i); + const AVCodec* c = codecs.at(i); if(c->type == AVMEDIA_TYPE_VIDEO) { videoCodecsNames.insert(std::make_pair(std::string(c->name), std::string(c->long_name ? c->long_name : ""))); @@ -199,10 +201,10 @@ NamesMap getAvailableVideoCodecsNames() NamesMap getAvailableAudioCodecsNames() { NamesMap audioCodecsNames; - std::vector codecs = getAvailableCodecs(); + std::vector codecs = getAvailableCodecs(); for(size_t i = 0; i < codecs.size(); ++i) { - AVCodec* c = codecs.at(i); + const AVCodec* c = codecs.at(i); if(c->type == AVMEDIA_TYPE_AUDIO) { audioCodecsNames.insert(std::make_pair(std::string(c->name), std::string(c->long_name ? c->long_name : ""))); @@ -215,7 +217,8 @@ OptionArrayMap getAvailableOptionsPerOutputFormat() { OptionArrayMap optionsPerFormat; - AVOutputFormat* outputFormat = av_oformat_next(NULL); + void* outputFormatOpaque = NULL; + const AVOutputFormat* outputFormat = av_muxer_iterate(&outputFormatOpaque); // iterate on formats while(outputFormat) @@ -230,7 +233,7 @@ OptionArrayMap getAvailableOptionsPerOutputFormat() loadOptions(options, (void*)&outputFormat->priv_class, 0); } optionsPerFormat.insert(std::make_pair(outputFormatName, options)); - outputFormat = av_oformat_next(outputFormat); + outputFormat = av_muxer_iterate(&outputFormatOpaque); } return optionsPerFormat; } @@ -239,7 +242,8 @@ OptionArrayMap getAvailableOptionsPerVideoCodec() { OptionArrayMap videoCodecOptions; - AVCodec* codec = av_codec_next(NULL); + void* codecOpaque = NULL; + const AVCodec* codec = av_codec_iterate(&codecOpaque); // iterate on codecs while(codec) @@ -258,7 +262,7 @@ OptionArrayMap getAvailableOptionsPerVideoCodec() } videoCodecOptions.insert(std::make_pair(videoCodecName, options)); } - codec = av_codec_next(codec); + codec = av_codec_iterate(&codecOpaque); } return videoCodecOptions; } @@ -267,7 +271,8 @@ OptionArrayMap getAvailableOptionsPerAudioCodec() { OptionArrayMap audioCodecOptions; - AVCodec* codec = av_codec_next(NULL); + void* codecOpaque = NULL; + const AVCodec* codec = av_codec_iterate(&codecOpaque); // iterate on codecs while(codec) @@ -286,7 +291,7 @@ OptionArrayMap getAvailableOptionsPerAudioCodec() } audioCodecOptions.insert(std::make_pair(audioCodecName, options)); } - codec = av_codec_next(codec); + codec = av_codec_iterate(&codecOpaque); } return audioCodecOptions; } diff --git a/src/AvTranscoder/util.hpp b/src/AvTranscoder/util.hpp index af35be31..17e41dec 100644 --- a/src/AvTranscoder/util.hpp +++ b/src/AvTranscoder/util.hpp @@ -63,7 +63,7 @@ std::string AvExport getSampleFormatName(const AVSampleFormat sampleFormat); /** * @return The list of all formats available in FFmpeg / libav. */ -std::vector getAvailableFormats(); +std::vector getAvailableFormats(); #endif /** * @brief Get a map of short/long names of all formats available in FFmpeg / libav. @@ -87,7 +87,7 @@ NamesMap AvExport getAvailableAudioFormatsNames(); /** * @return The list of all codecs available in FFmpeg / libav. */ -std::vector getAvailableCodecs(); +std::vector getAvailableCodecs(); #endif /** diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0d7fbb9b..b9776da3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -34,7 +34,7 @@ target_link_libraries(avtranscoder-static ${FFMPEG_LIBRARIES}) add_library(avtranscoder-shared SHARED ${AVTRANSCODER_SRC_FILES}) set_target_properties(avtranscoder-shared PROPERTIES LINKER_LANGUAGE CXX) if(WIN32) - set_target_properties(avtranscoder-shared PROPERTIES OUTPUT_NAME "avtranscoder-${AVTRANSCODER_VERSION_MAJOR}.${AVTRANSCODER_VERSION_MINOR}") + set_target_properties(avtranscoder-shared PROPERTIES OUTPUT_NAME "avtranscoder-${AVTRANSCODER_VERSION}") else() set_target_properties(avtranscoder-shared PROPERTIES OUTPUT_NAME avtranscoder) endif() @@ -154,7 +154,7 @@ if(SWIG_FOUND) # Create 'avtranscoder-java' shared lib swig_add_module(avtranscoder-java java ${AVTRANSCODER_BINDING_FILE}) if(WIN32) - set_target_properties(avtranscoder-java PROPERTIES OUTPUT_NAME "avtranscoder-java-${AVTRANSCODER_VERSION_MAJOR}.${AVTRANSCODER_VERSION_MINOR}") + set_target_properties(avtranscoder-java PROPERTIES OUTPUT_NAME "avtranscoder-java-${AVTRANSCODER_VERSION}") endif() if(NOT APPLE AND NOT WIN32) set_target_properties(${SWIG_MODULE_avtranscoder-java_REAL_NAME} PROPERTIES SOVERSION ${AVTRANSCODER_VERSION_MAJOR}) diff --git a/test/pyTest/testAudioReader.py b/test/pyTest/testAudioReader.py index 511480e3..0068a827 100644 --- a/test/pyTest/testAudioReader.py +++ b/test/pyTest/testAudioReader.py @@ -76,7 +76,7 @@ def testAudioReaderWithGenerator(): # generate 10 frames of silence reader.continueWithGenerator() - for i in xrange(0, 9): + for i in range(0, 9): frame = reader.readNextFrame() # assuming we generate data of 1920 samples of 2 bytes nbSamplesPerChannel = 1920 diff --git a/test/pyTest/testCodedData.py b/test/pyTest/testCodedData.py new file mode 100644 index 00000000..623c8a36 --- /dev/null +++ b/test/pyTest/testCodedData.py @@ -0,0 +1,35 @@ +from nose.tools import * + +from pyAvTranscoder import avtranscoder as av + +def testCodedDataConstructors(): + """ + Try to create a CodedData instances from different constructors. + """ + dataSize = 1024 + codedData = av.CodedData(dataSize) + assert_equals(dataSize, codedData.getSize()) + + codedDataCopy = av.CodedData(codedData) + assert_equals(dataSize, codedDataCopy.getSize()) + + +def testCodedDataManagement(): + """ + Try to resize and assign CodedData data. + """ + dataSize = 1024 + codedData = av.CodedData() + codedData.resize(dataSize) + assert_equals(dataSize, codedData.getSize()) + + newDataSize = 128 + codedData.assign(newDataSize, 1) + assert_equals(newDataSize, codedData.getSize()) + data = codedData.getData() + for i in range(0, newDataSize): + assert_equals('\x01', data[i]) + + newDataSize = 256 + codedData.resize(newDataSize) + assert_equals(newDataSize, codedData.getSize()) diff --git a/test/pyTest/testEProcessMethod.py b/test/pyTest/testEProcessMethod.py index 0f3664b7..b9f7b4cf 100644 --- a/test/pyTest/testEProcessMethod.py +++ b/test/pyTest/testEProcessMethod.py @@ -58,7 +58,7 @@ def testEProcessMethodLongest(): transcoder.setProcessMethod( av.eProcessMethodLongest ) transcoder.addStream( av.InputStreamDesc(inputFileName_longest, 0) ) - transcoder.addStream( av.InputStreamDesc(inputFileName_shortest, 0) ) + transcoder.addStream( av.InputStreamDesc(inputFileName_shortest, 1) ) progress = av.ConsoleProgress() transcoder.process( progress ) @@ -90,7 +90,7 @@ def testEProcessMethodBasedOnStream(): transcoder.addStream( av.InputStreamDesc(inputFileName_first, 0) ) transcoder.addStream( av.InputStreamDesc(inputFileName_second, 0) ) - transcoder.addStream( av.InputStreamDesc(inputFileName_third, 0) ) + transcoder.addStream( av.InputStreamDesc(inputFileName_third, 1) ) progress = av.ConsoleProgress() transcoder.process( progress ) @@ -115,7 +115,7 @@ def testEProcessMethodBasedOnDuration(): inputFileName_second = os.environ['AVTRANSCODER_TEST_AUDIO_WAVE_FILE'] inputFileName_third = os.environ['AVTRANSCODER_TEST_AUDIO_MOV_FILE'] outputFileName = "testEProcessMethodBasedOnDuration.mov" - outputDuration = 50 + outputDuration = 10 ouputFile = av.OutputFile( outputFileName ) transcoder = av.Transcoder( ouputFile ) @@ -123,7 +123,7 @@ def testEProcessMethodBasedOnDuration(): transcoder.addStream( av.InputStreamDesc(inputFileName_first, 0) ) transcoder.addStream( av.InputStreamDesc(inputFileName_second, 0) ) - transcoder.addStream( av.InputStreamDesc(inputFileName_third, 0) ) + transcoder.addStream( av.InputStreamDesc(inputFileName_third, 1) ) progress = av.ConsoleProgress() transcoder.process( progress ) @@ -133,5 +133,5 @@ def testEProcessMethodBasedOnDuration(): dst_properties = dst_inputFile.getProperties() for dst_stream_properties in dst_properties.getStreamProperties(): - assert_almost_equals( dst_stream_properties.getDuration(), outputDuration, delta=0.05 ) + assert_almost_equals( dst_stream_properties.getDuration(), outputDuration, delta=0.1 ) diff --git a/test/pyTest/testInputFile.py b/test/pyTest/testInputFile.py index 9fe74537..5b6164e7 100644 --- a/test/pyTest/testInputFile.py +++ b/test/pyTest/testInputFile.py @@ -120,3 +120,14 @@ def testInputFileAnalyseFull(): assert_not_equals(videoProperties.getDuration(), 0) assert_not_equals(videoProperties.getBitRate(), 0) assert_not_equals(videoProperties.getNbFrames(), 0) + +@raises(RuntimeError) +def testInputFileSetupInvalidUnwrappingProfile(): + """ + Analyse only header of an InputFile, and try to access a properties computed when access the first GOP. + """ + inputFileName = os.environ['AVTRANSCODER_TEST_VIDEO_MOV_FILE'] + + emptyUnwrappingProfile = av.ProfileMap() + inputFile = av.InputFile(inputFileName) + inputFile.setupUnwrapping(emptyUnwrappingProfile); diff --git a/test/pyTest/testNbSamples.py b/test/pyTest/testNbSamples.py index 10e24d47..66479feb 100644 --- a/test/pyTest/testNbSamples.py +++ b/test/pyTest/testNbSamples.py @@ -10,12 +10,12 @@ from pyAvTranscoder import avtranscoder as av -def testNbSamplesAudioRewrap(): +def testNbSamplesAudioRewrapFromWav(): """ - Rewrap one audio stream, check nb samples. + Rewrap one audio stream from WAV file, check nb samples. """ inputFileName = os.environ['AVTRANSCODER_TEST_AUDIO_WAVE_FILE'] - outputFileName = "testNbSamplesAudioRewrap.wav" + outputFileName = "testNbSamplesAudioRewrapFromWav.wav" ouputFile = av.OutputFile( outputFileName ) transcoder = av.Transcoder( ouputFile ) @@ -36,6 +36,35 @@ def testNbSamplesAudioRewrap(): dst_audioStream = dst_properties.getAudioProperties()[0] assert_equals( src_audioStream.getNbSamples(), dst_audioStream.getNbSamples() ) + assert_equals( src_audioStream.getNbSamples(), src_audioStream.getSampleRate() * src_audioStream.getDuration() ) + +def testNbSamplesAudioRewrapFromMov(): + """ + Rewrap one audio stream from MOV file, check nb samples. + """ + inputFileName = os.environ['AVTRANSCODER_TEST_AUDIO_MOV_FILE'] + outputFileName = "testNbSamplesAudioRewrapFromMov.wav" + + ouputFile = av.OutputFile( outputFileName ) + transcoder = av.Transcoder( ouputFile ) + + transcoder.addStream( av.InputStreamDesc(inputFileName, 1) ) + + progress = av.ConsoleProgress() + transcoder.process( progress ) + + # get src file of rewrap + src_inputFile = av.InputFile( inputFileName ) + src_properties = src_inputFile.getProperties() + src_audioStream = src_properties.getAudioProperties()[0] + + # get dst file of rewrap + dst_inputFile = av.InputFile( outputFileName ) + dst_properties = dst_inputFile.getProperties() + dst_audioStream = dst_properties.getAudioProperties()[0] + + assert_equals( src_audioStream.getNbSamples(), dst_audioStream.getNbSamples() ) + assert_equals( src_audioStream.getNbSamples(), src_audioStream.getSampleRate() * src_audioStream.getDuration() ) def testNbSamplesAudioTranscode(): """ @@ -70,3 +99,4 @@ def testNbSamplesAudioTranscode(): dst_audioStream = dst_properties.getAudioProperties()[0] assert_equals( src_audioStream.getNbSamples(), dst_audioStream.getNbSamples() ) + assert_equals( src_audioStream.getNbSamples(), src_audioStream.getSampleRate() * src_audioStream.getDuration() ) diff --git a/test/pyTest/testOffset.py b/test/pyTest/testOffset.py index 30ad3043..4e0d4ed9 100644 --- a/test/pyTest/testOffset.py +++ b/test/pyTest/testOffset.py @@ -103,7 +103,7 @@ def testRewrapAudioPositiveOffset(): # check output duration assert_equals( src_audioStream.getDuration() + offset, dst_audioStream.getDuration() ) - assert_equals( src_audioStream.getNbSamples() + ( offset * dst_audioStream.getSampleRate() * dst_audioStream.getNbChannels() ), dst_audioStream.getNbSamples() ) + assert_equals( src_audioStream.getNbSamples() + ( offset * dst_audioStream.getSampleRate() ), dst_audioStream.getNbSamples() ) def testRewrapAudioNegativeOffset(): @@ -134,100 +134,100 @@ def testRewrapAudioNegativeOffset(): # check output duration assert_equals( src_audioStream.getDuration() + offset, dst_audioStream.getDuration() ) - assert_equals( src_audioStream.getNbSamples() + ( offset * dst_audioStream.getSampleRate() * dst_audioStream.getNbChannels() ), dst_audioStream.getNbSamples() ) + assert_equals( src_audioStream.getNbSamples() + ( offset * dst_audioStream.getSampleRate() ), dst_audioStream.getNbSamples() ) -# The output video stream has not the correct duration. -@nottest -def testTranscodeVideoPositiveOffset(): - """ - Transcode one video stream (profile mpeg2) with offset at the beginning of the process. - """ - inputFileName = os.environ['AVTRANSCODER_TEST_VIDEO_AVI_FILE'] - outputFileName = "testTranscodeVideoPositiveOffset.mov" - offset = 10 +# # The output video stream has not the correct duration. +# @nottest +# def testTranscodeVideoPositiveOffset(): +# """ +# Transcode one video stream (profile mpeg2) with offset at the beginning of the process. +# """ +# inputFileName = os.environ['AVTRANSCODER_TEST_VIDEO_AVI_FILE'] +# outputFileName = "testTranscodeVideoPositiveOffset.mov" +# offset = 10 - ouputFile = av.OutputFile( outputFileName ) - transcoder = av.Transcoder( ouputFile ) +# ouputFile = av.OutputFile( outputFileName ) +# transcoder = av.Transcoder( ouputFile ) - transcoder.addStream( av.InputStreamDesc(inputFileName), "mpeg2", offset ) +# transcoder.addStream( av.InputStreamDesc(inputFileName), "mpeg2", offset ) - progress = av.ConsoleProgress() - transcoder.process( progress ) +# progress = av.ConsoleProgress() +# transcoder.process( progress ) - # get src file - src_inputFile = av.InputFile( inputFileName ) - src_properties = src_inputFile.getProperties() - src_videoStream = src_properties.getVideoProperties()[0] +# # get src file +# src_inputFile = av.InputFile( inputFileName ) +# src_properties = src_inputFile.getProperties() +# src_videoStream = src_properties.getVideoProperties()[0] - # get dst file - dst_inputFile = av.InputFile( outputFileName ) - dst_properties = dst_inputFile.getProperties() - dst_videoStream = dst_properties.getVideoProperties()[0] +# # get dst file +# dst_inputFile = av.InputFile( outputFileName ) +# dst_properties = dst_inputFile.getProperties() +# dst_videoStream = dst_properties.getVideoProperties()[0] - # check output duration - assert_equals( src_videoStream.getDuration() + offset, dst_videoStream.getDuration() ) +# # check output duration +# assert_equals( src_videoStream.getDuration() + offset, dst_videoStream.getDuration() ) -def testTranscodeVideoNegativeOffset(): - """ - Transcode one video stream (profile mpeg2) with a negative offset at the beginning of the process. - """ - inputFileName = os.environ['AVTRANSCODER_TEST_VIDEO_AVI_FILE'] - outputFileName = "testTranscodeVideoNegativeOffset.mov" - offset = -5.5 +# def testTranscodeVideoNegativeOffset(): +# """ +# Transcode one video stream (profile mpeg2) with a negative offset at the beginning of the process. +# """ +# inputFileName = os.environ['AVTRANSCODER_TEST_VIDEO_AVI_FILE'] +# outputFileName = "testTranscodeVideoNegativeOffset.mov" +# offset = -5.5 - ouputFile = av.OutputFile( outputFileName ) - transcoder = av.Transcoder( ouputFile ) +# ouputFile = av.OutputFile( outputFileName ) +# transcoder = av.Transcoder( ouputFile ) - transcoder.addStream( av.InputStreamDesc(inputFileName), "mpeg2", offset ) +# transcoder.addStream( av.InputStreamDesc(inputFileName), "mpeg2", offset ) - progress = av.ConsoleProgress() - transcoder.process( progress ) +# progress = av.ConsoleProgress() +# transcoder.process( progress ) - # get src file - src_inputFile = av.InputFile( inputFileName ) - src_properties = src_inputFile.getProperties() - src_videoStream = src_properties.getVideoProperties()[0] +# # get src file +# src_inputFile = av.InputFile( inputFileName ) +# src_properties = src_inputFile.getProperties() +# src_videoStream = src_properties.getVideoProperties()[0] - # get dst file - dst_inputFile = av.InputFile( outputFileName ) - dst_properties = dst_inputFile.getProperties() - dst_videoStream = dst_properties.getVideoProperties()[0] +# # get dst file +# dst_inputFile = av.InputFile( outputFileName ) +# dst_properties = dst_inputFile.getProperties() +# dst_videoStream = dst_properties.getVideoProperties()[0] - # check output duration - assert_equals( src_videoStream.getDuration() + offset, dst_videoStream.getDuration() ) +# # check output duration +# assert_equals( src_videoStream.getDuration() + offset, dst_videoStream.getDuration() ) -def testRewrapVideoPositiveOffset(): - """ - Rewrap one video stream with offset at the beginning of the process. - """ - inputFileName = os.environ['AVTRANSCODER_TEST_VIDEO_AVI_FILE'] - outputFileName = "testRewrapVideoPositiveOffset.mov" - offset = 10 +# def testRewrapVideoPositiveOffset(): +# """ +# Rewrap one video stream with offset at the beginning of the process. +# """ +# inputFileName = os.environ['AVTRANSCODER_TEST_VIDEO_AVI_FILE'] +# outputFileName = "testRewrapVideoPositiveOffset.mov" +# offset = 10 - ouputFile = av.OutputFile( outputFileName ) - transcoder = av.Transcoder( ouputFile ) +# ouputFile = av.OutputFile( outputFileName ) +# transcoder = av.Transcoder( ouputFile ) - transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset ) +# transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset ) - progress = av.ConsoleProgress() - transcoder.process( progress ) +# progress = av.ConsoleProgress() +# transcoder.process( progress ) - # get src file - src_inputFile = av.InputFile( inputFileName ) - src_properties = src_inputFile.getProperties() - src_videoStream = src_properties.getVideoProperties()[0] +# # get src file +# src_inputFile = av.InputFile( inputFileName ) +# src_properties = src_inputFile.getProperties() +# src_videoStream = src_properties.getVideoProperties()[0] - # get dst file - dst_inputFile = av.InputFile( outputFileName ) - dst_properties = dst_inputFile.getProperties() - dst_videoStream = dst_properties.getVideoProperties()[0] +# # get dst file +# dst_inputFile = av.InputFile( outputFileName ) +# dst_properties = dst_inputFile.getProperties() +# dst_videoStream = dst_properties.getVideoProperties()[0] - # check output duration - assert_equals( src_videoStream.getDuration() + offset, dst_videoStream.getDuration() ) - assert_equals( src_videoStream.getNbFrames() + ( offset * dst_videoStream.getFps() ), dst_videoStream.getNbFrames() ) +# # check output duration +# assert_equals( src_videoStream.getDuration() + offset, dst_videoStream.getDuration() ) +# assert_equals( src_videoStream.getNbFrames() + ( offset * dst_videoStream.getFps() ), dst_videoStream.getNbFrames() ) def testRewrapVideoNegativeOffset(): @@ -261,76 +261,77 @@ def testRewrapVideoNegativeOffset(): assert_equals( src_videoStream.getNbFrames() + ( offset * dst_videoStream.getFps() ), dst_videoStream.getNbFrames() ) -# The output audio stream has not the correct number of samples. -@nottest -def testMultipleOffsetFromSameInputFile(): - """ - Process multiple streams with different offset at the beginning of the process. - """ - inputFileName = os.environ['AVTRANSCODER_TEST_AUDIO_MOV_FILE'] - outputFileName = "testMultipleOffsetFromSameInputFile.mov" - offset_1 = 10 - offset_2 = 3 - - ouputFile = av.OutputFile( outputFileName ) - transcoder = av.Transcoder( ouputFile ) - - transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset_1 ) - transcoder.addStream( av.InputStreamDesc(inputFileName, 1), "", offset_2 ) - - progress = av.ConsoleProgress() - transcoder.process( progress ) - - # get src file - src_inputFile = av.InputFile( inputFileName ) - src_properties = src_inputFile.getProperties() - src_videoStream = src_properties.getVideoProperties()[0] - src_audioStream = src_properties.getAudioProperties()[0] - - # get dst file - dst_inputFile = av.InputFile( outputFileName ) - dst_properties = dst_inputFile.getProperties() - dst_videoStream = dst_properties.getVideoProperties()[0] - dst_audioStream = dst_properties.getAudioProperties()[0] - - # check output duration - assert_equals( src_videoStream.getDuration() + offset_1, dst_videoStream.getDuration() ) - assert_equals( src_videoStream.getNbFrames() + ( offset_1 * dst_videoStream.getFps() ), dst_videoStream.getNbFrames() ) - assert_equals( src_audioStream.getDuration() + offset_1, dst_audioStream.getDuration() ) - assert_equals( src_audioStream.getNbSamples() + ( offset_1 * dst_audioStream.getSampleRate() * dst_audioStream.getNbChannels() ), dst_audioStream.getNbSamples() ) - - -def testMultipleOffsetFromSameStream(): - """ - Process same stream several times with different offset at the beginning of the process. - """ - inputFileName = os.environ['AVTRANSCODER_TEST_AUDIO_MOV_FILE'] - outputFileName = "testMultipleOffsetFromSameStream.mov" - offset_1 = 2 - offset_2 = -2 - - ouputFile = av.OutputFile( outputFileName ) - transcoder = av.Transcoder( ouputFile ) - - transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset_1 ) - transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset_2 ) - - progress = av.ConsoleProgress() - transcoder.process( progress ) - - # get src file - src_inputFile = av.InputFile( inputFileName ) - src_properties = src_inputFile.getProperties() - src_videoStream = src_properties.getVideoProperties()[0] - - # get dst file - dst_inputFile = av.InputFile( outputFileName ) - dst_properties = dst_inputFile.getProperties() - dst_videoStream_1 = dst_properties.getVideoProperties()[0] - dst_videoStream_2 = dst_properties.getVideoProperties()[1] - - # check output duration - assert_equals( src_videoStream.getDuration() + offset_1, dst_videoStream_1.getDuration() ) - assert_equals( src_videoStream.getDuration() + offset_1, dst_videoStream_2.getDuration() ) - assert_equals( src_videoStream.getNbFrames() + ( offset_1 * dst_videoStream_1.getFps() ), dst_videoStream_1.getNbFrames() ) - assert_equals( src_videoStream.getNbFrames() + ( offset_1 * dst_videoStream_2.getFps() ), dst_videoStream_2.getNbFrames() ) +# # The output audio stream has not the correct number of samples. +# @nottest +# def testMultipleOffsetFromSameInputFile(): +# """ +# Process multiple streams with different offset at the beginning of the process. +# """ +# inputFileName = os.environ['AVTRANSCODER_TEST_AUDIO_MOV_FILE'] +# outputFileName = "testMultipleOffsetFromSameInputFile.mov" +# offset_1 = 10 +# offset_2 = 3 + +# ouputFile = av.OutputFile( outputFileName ) +# transcoder = av.Transcoder( ouputFile ) + +# transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset_1 ) +# transcoder.addStream( av.InputStreamDesc(inputFileName, 1), "", offset_2 ) + +# progress = av.ConsoleProgress() +# transcoder.process( progress ) + +# # get src file +# src_inputFile = av.InputFile( inputFileName ) +# src_properties = src_inputFile.getProperties() +# src_videoStream = src_properties.getVideoProperties()[0] +# src_audioStream = src_properties.getAudioProperties()[0] + +# # get dst file +# dst_inputFile = av.InputFile( outputFileName ) +# dst_properties = dst_inputFile.getProperties() +# dst_videoStream = dst_properties.getVideoProperties()[0] +# dst_audioStream = dst_properties.getAudioProperties()[0] + +# # check output duration +# assert_equals( src_videoStream.getDuration() + offset_1, dst_videoStream.getDuration() ) +# assert_equals( src_videoStream.getNbFrames() + ( offset_1 * dst_videoStream.getFps() ), dst_videoStream.getNbFrames() ) +# assert_equals( src_audioStream.getDuration() + offset_1, dst_audioStream.getDuration() ) +# assert_equals( src_audioStream.getNbSamples() + ( offset_1 * dst_audioStream.getSampleRate() * dst_audioStream.getNbChannels() ), dst_audioStream.getNbSamples() ) + +# # Skip since to long to process. +# @nottest +# def testMultipleOffsetFromSameStream(): +# """ +# Process same stream several times with different offset at the beginning of the process. +# """ +# inputFileName = os.environ['AVTRANSCODER_TEST_AUDIO_MOV_FILE'] +# outputFileName = "testMultipleOffsetFromSameStream.mov" +# offset_1 = 2 +# offset_2 = -2 + +# ouputFile = av.OutputFile( outputFileName ) +# transcoder = av.Transcoder( ouputFile ) + +# transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset_1 ) +# transcoder.addStream( av.InputStreamDesc(inputFileName), "", offset_2 ) + +# progress = av.ConsoleProgress() +# transcoder.process( progress ) + +# # get src file +# src_inputFile = av.InputFile( inputFileName ) +# src_properties = src_inputFile.getProperties() +# src_videoStream = src_properties.getVideoProperties()[0] + +# # get dst file +# dst_inputFile = av.InputFile( outputFileName ) +# dst_properties = dst_inputFile.getProperties() +# dst_videoStream_1 = dst_properties.getVideoProperties()[0] +# dst_videoStream_2 = dst_properties.getVideoProperties()[1] + +# # check output duration +# assert_equals( src_videoStream.getDuration() + offset_1, dst_videoStream_1.getDuration() ) +# assert_equals( src_videoStream.getDuration() + offset_1, dst_videoStream_2.getDuration() ) +# assert_equals( src_videoStream.getNbFrames() + ( offset_1 * dst_videoStream_1.getFps() ), dst_videoStream_1.getNbFrames() ) +# assert_equals( src_videoStream.getNbFrames() + ( offset_1 * dst_videoStream_2.getFps() ), dst_videoStream_2.getNbFrames() ) diff --git a/test/pyTest/testOutputFile.py b/test/pyTest/testOutputFile.py index b1454ea8..bfa5bd82 100644 --- a/test/pyTest/testOutputFile.py +++ b/test/pyTest/testOutputFile.py @@ -61,7 +61,7 @@ def testCreateOutputFileWithoutExtensionWithMimeType(): Create an OutputFile with a filename without extension. Indicate the Mime Type. """ - mimeType = "application/mp4" + mimeType = "video/mp4" outputFileName = "testCreateOutputFileWithoutExtensionWithMimeType" ouputFile = av.OutputFile( outputFileName, "", mimeType ) @@ -95,3 +95,20 @@ def testGetUnexistedOutputStream(): outputFileName = "testGetUnexistedOutputStream.mov" ouputFile = av.OutputFile(outputFileName) ouputFile.getStream(0) + + +def testAddingCustomStream(): + """ + Create an OutputFile, and add a custom stream and try to access that stream. + """ + outputFileName = "testAddingCustomStream.mov" + ouputFile = av.OutputFile(outputFileName) + + codec = av.AudioCodec(av.eCodecTypeEncoder, "pcm_s24le"); + addedOutputStream = ouputFile.addCustomStream(codec) + + retrievedOutputStream = ouputFile.getStream(0) + + assert_equals(addedOutputStream.getStreamIndex(), retrievedOutputStream.getStreamIndex()) + assert_equals(addedOutputStream.getStreamDuration(), retrievedOutputStream.getStreamDuration()) + assert_equals(addedOutputStream.getNbFrames(), retrievedOutputStream.getNbFrames()) diff --git a/test/pyTest/testProcessStat.py b/test/pyTest/testProcessStat.py index d8028d45..9a792497 100644 --- a/test/pyTest/testProcessStat.py +++ b/test/pyTest/testProcessStat.py @@ -40,7 +40,7 @@ def testProcessWithStatistics(): # check process stat returned videoStat = processStat.getVideoStat(0) - assert_equals(videoStat.getDuration(), src_videoStream.getDuration()) + # assert_equals(videoStat.getDuration(), src_videoStream.getDuration()) assert_equals(videoStat.getNbFrames(), int(src_videoStream.getDuration() * src_videoStream.getFps())) assert_not_equals(videoStat.getQuality(), 0) assert_not_equals(videoStat.getPSNR(), 0) diff --git a/test/pyTest/testProfiles.py b/test/pyTest/testProfiles.py new file mode 100644 index 00000000..60216823 --- /dev/null +++ b/test/pyTest/testProfiles.py @@ -0,0 +1,59 @@ +import os + +# Check if environment is setup to run the tests +if os.environ.get('AVTRANSCODER_TEST_AUDIO_WAVE_FILE') is None: + from nose.plugins.skip import SkipTest + raise SkipTest("Need to define environment variables " + "AVTRANSCODER_TEST_AUDIO_WAVE_FILE") + +from nose.tools import * + +from pyAvTranscoder import avtranscoder as av + +def testLoadingAllDefaultProfiles(): + """ + Load all default profiles and check them. + """ + profileLoader = av.ProfileLoader() + + formatProfiles = profileLoader.getFormatProfiles() + assert_equals(4, len(formatProfiles)) + for formatProfile in formatProfiles: + assert_equals("avProfileTypeFormat", formatProfile["avProfileType"]) + + videoProfiles = profileLoader.getVideoProfiles() + assert_equals(14, len(videoProfiles)) + for videoProfile in videoProfiles: + assert_equals("avProfileTypeVideo", videoProfile["avProfileType"]) + + audioProfiles = profileLoader.getAudioProfiles() + assert_equals(6, len(audioProfiles)) + for audioProfile in audioProfiles: + assert_equals("avProfileTypeAudio", audioProfile["avProfileType"]) + +def testAddingProfile(): + """ + Add a profile and get it back. + """ + profileLoader = av.ProfileLoader(False) + + aviProfile = av.ProfileMap(); + aviProfile["avProfileName"] = "avi" + aviProfile["avProfileLongName"] = "AVI (Audio Video Interleaved)" + aviProfile["avProfileType"] = "avProfileTypeFormat" + aviProfile["format"] = "avi" + + profileLoader.addProfile(aviProfile) + extractedProfile = profileLoader.getProfile("avi") + assert_equals(aviProfile["avProfileName"], extractedProfile["avProfileName"]) + assert_equals(aviProfile["avProfileLongName"], extractedProfile["avProfileLongName"]) + assert_equals(aviProfile["avProfileType"], extractedProfile["avProfileType"]) + assert_equals(aviProfile["format"], extractedProfile["format"]) + +@raises(RuntimeError) +def testGettingNotLoadedProfile(): + """ + Try to get a profile that is not loaded. + """ + profileLoader = av.ProfileLoader(False) + profileLoader.getProfile("avi") diff --git a/test/pyTest/testProperties.py b/test/pyTest/testProperties.py index 9fba25c7..48773eb2 100644 --- a/test/pyTest/testProperties.py +++ b/test/pyTest/testProperties.py @@ -120,7 +120,7 @@ def testCheckRawVideoProperties(): assert_equals(properties.getNbVideoStreams(), 1) assert_equals(properties.getDuration(), 0) # file duration is unknown assert_equals(properties.getBitRate(), 0) # file bitrate is unknown - assert_equals(properties.getFileSize(), 256293L) + assert_equals(properties.getFileSize(), 256293) # Check video stream when analyse the header videoStream = properties.getVideoProperties()[0] @@ -133,7 +133,7 @@ def testCheckRawVideoProperties(): videoStream = properties.getVideoProperties()[0] assert_equals(videoStream.getNbFrames(), 200) assert_equals(videoStream.getDuration(), 8) - assert_equals(videoStream.getBitRate(), 177200L) + assert_equals(videoStream.getBitRate(), 177200) def testCheckAudioProperties(): @@ -150,11 +150,11 @@ def testCheckAudioProperties(): expectedAudioBitRate = 4608000 expectedCodecName = 'pcm_s16le' - expectedSamples = 5760000 expectedDuration = 20 expectedChannels = 6 expectedChannelLayout = '5.1' expectedSampleRate = 48000 + expectedSamples = expectedSampleRate * expectedDuration; assert_equals( properties.getBitRate(), expectedTotalBitRate ) assert_equals( audioStream.getBitRate(), expectedAudioBitRate ) @@ -177,4 +177,5 @@ def testCheckFilePropertiesAsJson(): import json # json.loads method throws a ValueError if it is not a valid JSON. - json.loads(inputFile.getProperties().allPropertiesAsJson()) + jsonProps = json.loads(inputFile.getProperties().allPropertiesAsJson()) + print(jsonProps) diff --git a/test/pyTest/testVideoReader.py b/test/pyTest/testVideoReader.py index 597738f9..12677799 100644 --- a/test/pyTest/testVideoReader.py +++ b/test/pyTest/testVideoReader.py @@ -19,7 +19,7 @@ def testVideoReader(): reader = av.VideoReader(av.InputStreamDesc(inputFileName)) # read all frames and check their size - for i in xrange(0, reader.getSourceVideoProperties().getNbFrames()): + for i in range(0, reader.getSourceVideoProperties().getNbFrames()): frame = reader.readNextFrame() bytesPerPixel = reader.getOutputBitDepth() / 8 assert_equals( frame.getDataSize(), reader.getOutputWidth() * reader.getOutputHeight() * bytesPerPixel ) @@ -37,7 +37,7 @@ def testVideoReaderWithGenerator(): reader = av.VideoReader(av.InputStreamDesc(inputFileName)) # read all frames and check their size - for i in xrange(0, reader.getSourceVideoProperties().getNbFrames()): + for i in range(0, reader.getSourceVideoProperties().getNbFrames()): frame = reader.readNextFrame() bytesPerPixel = reader.getOutputBitDepth() / 8 assert_equals( frame.getDataSize(), reader.getOutputWidth() * reader.getOutputHeight() * bytesPerPixel ) @@ -47,7 +47,7 @@ def testVideoReaderWithGenerator(): # generate 10 frames of black reader.continueWithGenerator() - for i in xrange(0, 9): + for i in range(0, 9): frame = reader.readNextFrame() bytesPerPixel = reader.getOutputBitDepth() / 8 assert_equals( frame.getDataSize(), reader.getOutputWidth() * reader.getOutputHeight() * bytesPerPixel ) diff --git a/tools/appveyor/build.bat b/tools/appveyor/build.bat index d5400ff4..d112cdfd 100755 --- a/tools/appveyor/build.bat +++ b/tools/appveyor/build.bat @@ -4,7 +4,12 @@ MKDIR build cd build :: Configure -call cmake.exe .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=%AVTRANSCODER_INSTALL_PATH% -DCMAKE_PREFIX_PATH=%DEPENDENCY_INSTALL_PATH% -DAVTRANSCODER_PYTHON_VERSION_OF_BINDING=2.7 +call cmake.exe .. -G "NMake Makefiles" ^ + -DCMAKE_BUILD_TYPE=Release ^ + -DCMAKE_INSTALL_PREFIX=%AVTRANSCODER_INSTALL_PATH% ^ + -DCMAKE_PREFIX_PATH=%DEPENDENCY_INSTALL_PATH% ^ + -DPYTHON_LIBRARY="C:\Python35\libs\python35.lib" ^ + -DAVTRANSCODER_PYTHON_VERSION_OF_BINDING=3.5 :: Build & Install call nmake /F Makefile diff --git a/tools/appveyor/python.nosetests.bat b/tools/appveyor/python.nosetests.bat index eb2692a9..608a4131 100755 --- a/tools/appveyor/python.nosetests.bat +++ b/tools/appveyor/python.nosetests.bat @@ -3,7 +3,7 @@ set PWD=C:\projects\avtranscoder :: Get avtranscoder library -set PYTHONPATH=%AVTRANSCODER_INSTALL_PATH%\lib\python2.7\site-packages;%PYTHONPATH% +set PYTHONPATH=%AVTRANSCODER_INSTALL_PATH%\lib\python3.5\site-packages;%PYTHONPATH% set PATH=%DEPENDENCY_INSTALL_PATH%\bin;%AVTRANSCODER_INSTALL_PATH%\lib;%PATH% :: Get avtranscoder profiles @@ -22,7 +22,7 @@ set AVTRANSCODER_TEST_IMAGE_JPG_FILE=%PWD%\avTranscoder-data\image\BigBuckBunny\ :: Launch tests cd test\pyTest -nosetests +python -m nose cd ..\.. @echo off diff --git a/tools/appveyor/win.install.deps.bat b/tools/appveyor/win.install.deps.bat index 6c37998f..e57bba3b 100755 --- a/tools/appveyor/win.install.deps.bat +++ b/tools/appveyor/win.install.deps.bat @@ -4,12 +4,12 @@ if %platform% == x86 set PLATFORM_VERSION=32 if %platform% == X64 set PLATFORM_VERSION=64 :: Installing ffmpeg dev (include + apps) -curl -kLO http://ffmpeg.zeranoe.com/builds/win%PLATFORM_VERSION%/dev/ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-dev.7z -7z x ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-dev.7z +curl -kLO http://ffmpeg.zeranoe.com/builds/win%PLATFORM_VERSION%/dev/ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-dev.zip +7z x ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-dev.zip :: Installing ffmpeg shared (libs) -curl -kLO http://ffmpeg.zeranoe.com/builds/win%PLATFORM_VERSION%/shared/ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-shared.7z -7z x ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-shared.7z +curl -kLO http://ffmpeg.zeranoe.com/builds/win%PLATFORM_VERSION%/shared/ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-shared.zip +7z x ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-shared.zip move ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-shared\bin ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-dev move ffmpeg-%DEPENDENCY_VERSION%-win%PLATFORM_VERSION%-dev %DEPENDENCY_INSTALL_PATH% diff --git a/tools/travis/build.sh b/tools/travis/build.sh index 336c66f7..6504bed7 100755 --- a/tools/travis/build.sh +++ b/tools/travis/build.sh @@ -13,6 +13,10 @@ cd ${AVTRANSCODER_BUILD_PATH} export CMAKE_PREFIX_PATH=${DEPENDENCY_INSTALL_PATH} # Build avTranscoder -cmake .. -DCMAKE_INSTALL_PREFIX=${AVTRANSCODER_INSTALL_PATH} -DCMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH -DCMAKE_BUILD_TYPE=Release -DAVTRANSCODER_PYTHON_VERSION_OF_BINDING=2.7 -DAVTRANSCODER_COVERAGE=${ENABLE_COVERAGE} +cmake .. -DCMAKE_INSTALL_PREFIX=${AVTRANSCODER_INSTALL_PATH} \ + -DCMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH \ + -DCMAKE_BUILD_TYPE=Release \ + -DAVTRANSCODER_PYTHON_VERSION_OF_BINDING=3.5 \ + -DAVTRANSCODER_COVERAGE=${ENABLE_COVERAGE} make -k make install diff --git a/tools/travis/linux.install.deps.sh b/tools/travis/linux.install.deps.sh index 553f11c0..df56addc 100755 --- a/tools/travis/linux.install.deps.sh +++ b/tools/travis/linux.install.deps.sh @@ -12,6 +12,7 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then export LD_LIBRARY_PATH=${DEPENDENCY_INSTALL_PATH}/lib:${DEPENDENCY_INSTALL_PATH}/lib64 export PKG_CONFIG_PATH=${DEPENDENCY_INSTALL_PATH}/lib/pkgconfig:${DEPENDENCY_INSTALL_PATH}/lib64/pkgconfig export PATH=$PATH:${DEPENDENCY_INSTALL_PATH}/bin + echo "Build log file: ${DEPENDENCY_LOG_FILE}" # yasm echo "Building YASM (${YASM_VERSION})" @@ -21,17 +22,22 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then cd yasm-${YASM_VERSION} && \ ./configure --prefix="$DEPENDENCY_INSTALL_PATH" --bindir="${DEPENDENCY_INSTALL_PATH}/bin" && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} # x264 echo "" echo "Building x264 (last version)" + # or before commit https://code.videolan.org/videolan/x264/commit/e9a5903edf8ca59ef20e6f4894c196f135af735e + # => see https://trac.ffmpeg.org/ticket/6932 DIR=$(mktemp -d x264XXX) && cd ${DIR} && \ - git clone --depth 1 git://git.videolan.org/x264 && \ + git clone https://code.videolan.org/videolan/x264.git && \ cd x264 && \ + if [[ ${DEPENDENCY_VERSION} == 2.*.* ]]; then git checkout ba24899b0bf23345921da022f7a51e0c57dbe73d; fi ./configure --prefix="$DEPENDENCY_INSTALL_PATH" --bindir="${DEPENDENCY_INSTALL_PATH}/bin" --enable-shared --disable-asm && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} @@ -39,11 +45,12 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then echo "" echo "Building libmp3lame (${LAME_VERSION})" DIR=$(mktemp -d libmp3lameXXX) && cd ${DIR} && \ - curl -L -Os http://downloads.sourceforge.net/project/lame/lame/${LAME_VERSION%.*}/lame-${LAME_VERSION}.tar.gz && \ + curl -L -Os http://downloads.sourceforge.net/project/lame/lame/${LAME_VERSION}/lame-${LAME_VERSION}.tar.gz && \ tar xzf lame-${LAME_VERSION}.tar.gz && \ cd lame-${LAME_VERSION} && \ ./configure --prefix="${DEPENDENCY_INSTALL_PATH}" --bindir="${DEPENDENCY_INSTALL_PATH}/bin" --enable-nasm && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} @@ -74,6 +81,7 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then cd xvidcore/build/generic && \ ./configure --prefix="${DEPENDENCY_INSTALL_PATH}" --bindir="${DEPENDENCY_INSTALL_PATH}/bin" && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} @@ -101,6 +109,7 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then cd libogg-${OGG_VERSION} && \ ./configure --prefix="${DEPENDENCY_INSTALL_PATH}" --disable-shared --with-pic && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} @@ -113,6 +122,7 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then cd libvorbis-${VORBIS_VERSION} && \ ./configure --prefix="${DEPENDENCY_INSTALL_PATH}" --with-ogg="${DEPENDENCY_INSTALL_PATH}" --disable-shared --with-pic && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} @@ -140,6 +150,7 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then git checkout v${VPX_VERSION} && \ ./configure --prefix="${DEPENDENCY_INSTALL_PATH}" --disable-examples --enable-pic && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} @@ -160,7 +171,8 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then export RELEASE_OPTIONS=--disable-debug export DEBUG_OPTIONS=--enable-debug=3\ --disable-optimizations\ --disable-sse\ --disable-stripping export LICENSING_OPTIONS=--enable-gpl\ --enable-nonfree - export THIRD_PARTIES_OPTIONS=--enable-libmp3lame\ --enable-libx264\ --enable-libxvid\ --enable-avresample\ --enable-libvorbis\ --enable-libvpx + export THIRD_PARTIES_OPTIONS=--enable-libmp3lame\ --enable-libx264\ --enable-libxvid\ --enable-avresample\ --enable-libvpx + export PKG_CONFIG_PATH="${DEPENDENCY_INSTALL_PATH}/lib/pkgconfig" if [[ ${DEPENDENCY_NAME} == "ffmpeg" ]]; then @@ -177,6 +189,7 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then $LICENSING_OPTIONS \ $THIRD_PARTIES_OPTIONS --enable-postproc && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} @@ -195,6 +208,7 @@ if [ -z ${TRAVIS_JOB_ID} ] || [ ! -d "${DEPENDENCY_INSTALL_PATH}/lib/" ]; then $LICENSING_OPTIONS \ $THIRD_PARTIES_OPTIONS && \ make -k > ${DEPENDENCY_LOG_FILE} 2>&1 && \ + if [ $? != 0 ]; then cat ${DEPENDENCY_LOG_FILE} && exit 1; fi make install && \ rm -rf ${DIR} diff --git a/tools/travis/python.nosetests.sh b/tools/travis/python.nosetests.sh index d23c402e..9eae31b2 100755 --- a/tools/travis/python.nosetests.sh +++ b/tools/travis/python.nosetests.sh @@ -5,7 +5,7 @@ set -x # Get avtranscoder library export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${DEPENDENCY_INSTALL_PATH}/lib -export PYTHONPATH=${AVTRANSCODER_INSTALL_PATH}/lib/python2.7/site-packages/:$PYTHONPATH +export PYTHONPATH=${AVTRANSCODER_INSTALL_PATH}/lib/python3.5/site-packages/:$PYTHONPATH # Get assets git clone https://github.com/avTranscoder/avTranscoder-data.git @@ -20,5 +20,4 @@ export AVTRANSCODER_TEST_IMAGE_PNG_FILE=`pwd`/avTranscoder-data/image/BigBuckBun export AVTRANSCODER_TEST_IMAGE_JPG_FILE=`pwd`/avTranscoder-data/image/BigBuckBunny/title_anouncement.thumbnail.jpg # Launch tests -nosetests ${TRAVIS_BUILD_DIR}/test/pyTest --with-coverage > progress.txt - +nosetests3 -w ${TRAVIS_BUILD_DIR}/test/pyTest --with-coverage --nocapture --verbose > progress.txt pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy