#include <stdio.h>
#include <chrono>
#include <thread>
#include "opencv2/opencv.hpp"
#include <string>
int main()
{
// 50 frames, resolution 1344x756, and 25 fps
int width = 1344;
int height = 756;
int n_frames = 50;
int fps = 25;
const std::string output_filename = "output.mp4"; //Example for file name with spaces: "\"output with spaces.mp4\""
//Open ffmpeg application as sub - process
//FFmpeg input PIPE : RAW images in BGR color format
//FFmpeg output MP4 file encoded with HEVC codec (using libx265 encoder).
std::string ffmpeg_cmd = std::string("ffmpeg -y -f rawvideo -r ") + std::to_string(fps) +
" -video_size " + std::to_string(width) + "x" + std::to_string(height) +
" -pixel_format bgr24 -i pipe: -vcodec libx265 -crf 24 -pix_fmt yuv420p " + output_filename;
//Execute FFmpeg as sub-process, open stdin pipe (of FFmpeg sub-process) for writing.
//In Windows we need to use _popen and in Linux popen
#ifdef _MSC_VER
FILE* pipeout = _popen(ffmpeg_cmd.c_str(), "wb"); //Windows (ffmpeg.exe must be in the execution path)
#else
//https://batchloaf.wordpress.com/2017/02/12/a-simple-way-to-read-and-write-audio-and-video-files-in-c-using-ffmpeg-part-2-video/
FILE* pipeout = popen(ffmpeg_cmd.c_str(), "w"); //Linux (assume ffmpeg exist in /usr/bin/ffmpeg (and in path).
#endif
for (int i = 0; i < n_frames; i++)
{
//Build synthetic image for testing ("render" a video frame):
cv::Mat frame = cv::Mat(height, width, CV_8UC3);
frame = cv::Scalar(60, 60, 60); //Fill background with dark gray
cv::putText(frame, std::to_string(i+1), cv::Point(width/2 - 100*(int)(std::to_string(i+1).length()), height/2+100), cv::FONT_HERSHEY_DUPLEX, 10, cv::Scalar(255, 30, 30), 20); // Draw a blue number
//cv::imshow("frame", frame); cv::waitKey(1); //Show the frame for testing
//Write width*height*3 bytes to stdin pipe of FFmpeg sub-process (assume frame data is continuous in the RAM).
fwrite(frame.data, 1, (size_t)width*height*3, pipeout);
}
//Flush and close input and output pipes
fflush(pipeout);
#ifdef _MSC_VER
_pclose(pipeout); //Windows
#else
pclose(pipeout); //Linux
#endif
//It looks like we need to wait one more second at the end. //https://stackoverflow.com/a/62804585/4926757
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // sleep for 1 second
return 0;
}
#include <stdio.h>
#include <chrono>
#include <thread>
#include "opencv2/opencv.hpp"
#include <string>
int main()
{
// 50 frames, resolution 1344x756, and 25 fps
int width = 1344;
int height = 756;
int n_frames = 50;
int fps = 25;
const std::string output_filename = "output.mp4";
//Open ffmpeg application as sub - process
//FFmpeg input PIPE : RAW images in BGR color format
//FFmpeg output MP4 file encoded with HEVC codec (using libx265 encoder).
std::string ffmpeg_cmd = std::string("ffmpeg -y -f rawvideo -r ") + std::to_string(fps) +
" -video_size " + std::to_string(width) + "x" + std::to_string(height) +
" -pixel_format bgr24 -i pipe: -vcodec libx265 -crf 24 -pix_fmt yuv420p " + output_filename;
//Execute FFmpeg as sub-process, open stdin pipe (of FFmpeg sub-process) for writing.
//In Windows we need to use _popen and in Linux popen
#ifdef _MSC_VER
FILE* pipeout = _popen(ffmpeg_cmd.c_str(), "wb"); //Windows (ffmpeg.exe must be in the execution path)
#else
//https://batchloaf.wordpress.com/2017/02/12/a-simple-way-to-read-and-write-audio-and-video-files-in-c-using-ffmpeg-part-2-video/
FILE* pipeout = popen(ffmpeg_cmd.c_str(), "w"); //Linux (assume ffmpeg exist in /usr/bin/ffmpeg (and in path).
#endif
for (int i = 0; i < n_frames; i++)
{
//Build synthetic image for testing ("render" a video frame):
cv::Mat frame = cv::Mat(height, width, CV_8UC3);
frame = cv::Scalar(60, 60, 60); //Fill background with dark gray
cv::putText(frame, std::to_string(i+1), cv::Point(width/2 - 100*(int)(std::to_string(i+1).length()), height/2+100), cv::FONT_HERSHEY_DUPLEX, 10, cv::Scalar(255, 30, 30), 20); // Draw a blue number
//cv::imshow("frame", frame); cv::waitKey(1); //Show the frame for testing
//Write width*height*3 bytes to stdin pipe of FFmpeg sub-process (assume frame data is continuous in the RAM).
fwrite(frame.data, 1, (size_t)width*height*3, pipeout);
}
//Flush and close input and output pipes
fflush(pipeout);
#ifdef _MSC_VER
_pclose(pipeout); //Windows
#else
pclose(pipeout); //Linux
#endif
//It looks like we need to wait one more second at the end. //https://stackoverflow.com/a/62804585/4926757
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // sleep for 1 second
return 0;
}
#include <stdio.h>
#include <chrono>
#include <thread>
#include "opencv2/opencv.hpp"
#include <string>
int main()
{
// 50 frames, resolution 1344x756, and 25 fps
int width = 1344;
int height = 756;
int n_frames = 50;
int fps = 25;
const std::string output_filename = "output.mp4"; //Example for file name with spaces: "\"output with spaces.mp4\""
//Open ffmpeg application as sub - process
//FFmpeg input PIPE : RAW images in BGR color format
//FFmpeg output MP4 file encoded with HEVC codec (using libx265 encoder).
std::string ffmpeg_cmd = std::string("ffmpeg -y -f rawvideo -r ") + std::to_string(fps) +
" -video_size " + std::to_string(width) + "x" + std::to_string(height) +
" -pixel_format bgr24 -i pipe: -vcodec libx265 -crf 24 -pix_fmt yuv420p " + output_filename;
//Execute FFmpeg as sub-process, open stdin pipe (of FFmpeg sub-process) for writing.
//In Windows we need to use _popen and in Linux popen
#ifdef _MSC_VER
FILE* pipeout = _popen(ffmpeg_cmd.c_str(), "wb"); //Windows (ffmpeg.exe must be in the execution path)
#else
//https://batchloaf.wordpress.com/2017/02/12/a-simple-way-to-read-and-write-audio-and-video-files-in-c-using-ffmpeg-part-2-video/
FILE* pipeout = popen(ffmpeg_cmd.c_str(), "w"); //Linux (assume ffmpeg exist in /usr/bin/ffmpeg (and in path).
#endif
for (int i = 0; i < n_frames; i++)
{
//Build synthetic image for testing ("render" a video frame):
cv::Mat frame = cv::Mat(height, width, CV_8UC3);
frame = cv::Scalar(60, 60, 60); //Fill background with dark gray
cv::putText(frame, std::to_string(i+1), cv::Point(width/2 - 100*(int)(std::to_string(i+1).length()), height/2+100), cv::FONT_HERSHEY_DUPLEX, 10, cv::Scalar(255, 30, 30), 20); // Draw a blue number
//cv::imshow("frame", frame); cv::waitKey(1); //Show the frame for testing
//Write width*height*3 bytes to stdin pipe of FFmpeg sub-process (assume frame data is continuous in the RAM).
fwrite(frame.data, 1, (size_t)width*height*3, pipeout);
}
//Flush and close input and output pipes
fflush(pipeout);
#ifdef _MSC_VER
_pclose(pipeout); //Windows
#else
pclose(pipeout); //Linux
#endif
//It looks like we need to wait one more second at the end. //https://stackoverflow.com/a/62804585/4926757
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // sleep for 1 second
return 0;
}
import cv2
import numpy as np
import subprocess as sp
import shlex
width, height, n_frames, fps = 1344, 756, 50, 25 # 50 frames, resolution 1344x756, and 25 fps
output_filename = 'output.mp4'
# Open ffmpeg application as sub-process
# FFmpeg input PIPE: RAW images in BGR color format
# FFmpeg output MP4 file encoded with HEVC codec.
# Arguments list:
# -y Overwrite output file without asking
# -s {width}x{height} Input resolution width x height (1344x756)
# -pixel_format bgr24 Input frame color format is BGR with 8 bits per color component
# -f rawvideo Input format: raw video
# -r {fps} Frame rate: fps (25fps)
# -i pipe: ffmpeg input is a PIPE
# -vcodec libx265 Video codec: H.265 (HEVC)
# -pix_fmt yuv420p Output video color space YUV420 (saving space compared to YUV444)
# -crf 24 Constant quality encoding (lower value for higher quality and larger output file).
# {output_filename} Output file name: output_filename (output.mp4)
process = sp.Popen(shlex.split(f'ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
# Build synthetic video frames and write them to ffmpeg input stream.
for i in range(n_frames):
# Build synthetic image for testing ("render" a video frame).
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (255, 30, 30), 20) # Blue number
# Write raw video frame to input stream of ffmpeg sub-process.
process.stdin.write(img.tobytes())
# Close and flush stdin
process.stdin.close()
# Wait for sub-process to finish
process.wait()
# Terminate the sub-process
process.terminate() # Note: We don't have to terminate the sub-process (after process.wait(), the sub-process is supposed to be closed).
ffmpeg
executable must be in the execution path of the Python script.For Linux, in case
ffmpeg
is not in the execution path, you may use the full path:process = sp.Popen(shlex.split(f'/usr/bin/ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
(Assuming
ffmpeg
executable is in/usr/bin/
).Python 3's f-Strings syntax requires Python version 3.6 or above.
C++ example:
In Python there are multiple FFmpeg bindings that allows H.265 video encoding.
In C++, there are far less options...
We may apply similar solution with C++ (using FFmpeg sub-process).
For executing FFmpeg sub-process and opening stdin
pipe, we may use _popen in Windows and popen in Linux.
Note:
- I noticed that
_popen
is not as reliable asCreateProcess
, and we need to wait (say one second at the end) for the output file to get closed.
I am not sure if there is a similar issue withpopen
in Linux.
C++ Code sample:
#include <stdio.h>
#include <chrono>
#include <thread>
#include "opencv2/opencv.hpp"
#include <string>
int main()
{
// 50 frames, resolution 1344x756, and 25 fps
int width = 1344;
int height = 756;
int n_frames = 50;
int fps = 25;
const std::string output_filename = "output.mp4";
//Open ffmpeg application as sub - process
//FFmpeg input PIPE : RAW images in BGR color format
//FFmpeg output MP4 file encoded with HEVC codec (using libx265 encoder).
std::string ffmpeg_cmd = std::string("ffmpeg -y -f rawvideo -r ") + std::to_string(fps) +
" -video_size " + std::to_string(width) + "x" + std::to_string(height) +
" -pixel_format bgr24 -i pipe: -vcodec libx265 -crf 24 -pix_fmt yuv420p " + output_filename;
//Execute FFmpeg as sub-process, open stdin pipe (of FFmpeg sub-process) for writing.
//In Windows we need to use _popen and in Linux popen
#ifdef _MSC_VER
FILE* pipeout = _popen(ffmpeg_cmd.c_str(), "wb"); //Windows (ffmpeg.exe must be in the execution path)
#else
//https://batchloaf.wordpress.com/2017/02/12/a-simple-way-to-read-and-write-audio-and-video-files-in-c-using-ffmpeg-part-2-video/
FILE* pipeout = popen(ffmpeg_cmd.c_str(), "w"); //Linux (assume ffmpeg exist in /usr/bin/ffmpeg (and in path).
#endif
for (int i = 0; i < n_frames; i++)
{
//Build synthetic image for testing ("render" a video frame):
cv::Mat frame = cv::Mat(height, width, CV_8UC3);
frame = cv::Scalar(60, 60, 60); //Fill background with dark gray
cv::putText(frame, std::to_string(i+1), cv::Point(width/2 - 100*(int)(std::to_string(i+1).length()), height/2+100), cv::FONT_HERSHEY_DUPLEX, 10, cv::Scalar(255, 30, 30), 20); // Draw a blue number
//cv::imshow("frame", frame); cv::waitKey(1); //Show the frame for testing
//Write width*height*3 bytes to stdin pipe of FFmpeg sub-process (assume frame data is continuous in the RAM).
fwrite(frame.data, 1, (size_t)width*height*3, pipeout);
}
//Flush and close input and output pipes
fflush(pipeout);
#ifdef _MSC_VER
_pclose(pipeout); //Windows
#else
pclose(pipeout); //Linux
#endif
//It looks like we need to wait one more second at the end. //https://stackoverflow.com/a/62804585/4926757
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // sleep for 1 second
return 0;
}
import cv2
import numpy as np
import subprocess as sp
import shlex
width, height, n_frames, fps = 1344, 756, 50, 25 # 50 frames, resolution 1344x756, and 25 fps
output_filename = 'output.mp4'
# Open ffmpeg application as sub-process
# FFmpeg input PIPE: RAW images in BGR color format
# FFmpeg output MP4 file encoded with HEVC codec.
# Arguments list:
# -y Overwrite output file without asking
# -s {width}x{height} Input resolution width x height (1344x756)
# -pixel_format bgr24 Input frame color format is BGR with 8 bits per color component
# -f rawvideo Input format: raw video
# -r {fps} Frame rate: fps (25fps)
# -i pipe: ffmpeg input is a PIPE
# -vcodec libx265 Video codec: H.265 (HEVC)
# -pix_fmt yuv420p Output video color space YUV420 (saving space compared to YUV444)
# -crf 24 Constant quality encoding (lower value for higher quality and larger output file).
# {output_filename} Output file name: output_filename (output.mp4)
process = sp.Popen(shlex.split(f'ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
# Build synthetic video frames and write them to ffmpeg input stream.
for i in range(n_frames):
# Build synthetic image for testing ("render" a video frame).
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (255, 30, 30), 20) # Blue number
# Write raw video frame to input stream of ffmpeg sub-process.
process.stdin.write(img.tobytes())
# Close and flush stdin
process.stdin.close()
# Wait for sub-process to finish
process.wait()
# Terminate the sub-process
process.terminate()
ffmpeg
executable must be in the execution path of the Python script.For Linux, in case
ffmpeg
is not in the execution path, you may use the full path:process = sp.Popen(shlex.split(f'/usr/bin/ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
(Assuming
ffmpeg
executable is in/usr/bin/
).Python 3's f-Strings syntax requires Python version 3.6 or above.
import cv2
import numpy as np
import subprocess as sp
import shlex
width, height, n_frames, fps = 1344, 756, 50, 25 # 50 frames, resolution 1344x756, and 25 fps
output_filename = 'output.mp4'
# Open ffmpeg application as sub-process
# FFmpeg input PIPE: RAW images in BGR color format
# FFmpeg output MP4 file encoded with HEVC codec.
# Arguments list:
# -y Overwrite output file without asking
# -s {width}x{height} Input resolution width x height (1344x756)
# -pixel_format bgr24 Input frame color format is BGR with 8 bits per color component
# -f rawvideo Input format: raw video
# -r {fps} Frame rate: fps (25fps)
# -i pipe: ffmpeg input is a PIPE
# -vcodec libx265 Video codec: H.265 (HEVC)
# -pix_fmt yuv420p Output video color space YUV420 (saving space compared to YUV444)
# -crf 24 Constant quality encoding (lower value for higher quality and larger output file).
# {output_filename} Output file name: output_filename (output.mp4)
process = sp.Popen(shlex.split(f'ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
# Build synthetic video frames and write them to ffmpeg input stream.
for i in range(n_frames):
# Build synthetic image for testing ("render" a video frame).
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (255, 30, 30), 20) # Blue number
# Write raw video frame to input stream of ffmpeg sub-process.
process.stdin.write(img.tobytes())
# Close and flush stdin
process.stdin.close()
# Wait for sub-process to finish
process.wait()
# Terminate the sub-process
process.terminate() # Note: We don't have to terminate the sub-process (after process.wait(), the sub-process is supposed to be closed).
ffmpeg
executable must be in the execution path of the Python script.For Linux, in case
ffmpeg
is not in the execution path, you may use the full path:process = sp.Popen(shlex.split(f'/usr/bin/ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
(Assuming
ffmpeg
executable is in/usr/bin/
).Python 3's f-Strings syntax requires Python version 3.6 or above.
C++ example:
In Python there are multiple FFmpeg bindings that allows H.265 video encoding.
In C++, there are far less options...
We may apply similar solution with C++ (using FFmpeg sub-process).
For executing FFmpeg sub-process and opening stdin
pipe, we may use _popen in Windows and popen in Linux.
Note:
- I noticed that
_popen
is not as reliable asCreateProcess
, and we need to wait (say one second at the end) for the output file to get closed.
I am not sure if there is a similar issue withpopen
in Linux.
C++ Code sample:
#include <stdio.h>
#include <chrono>
#include <thread>
#include "opencv2/opencv.hpp"
#include <string>
int main()
{
// 50 frames, resolution 1344x756, and 25 fps
int width = 1344;
int height = 756;
int n_frames = 50;
int fps = 25;
const std::string output_filename = "output.mp4";
//Open ffmpeg application as sub - process
//FFmpeg input PIPE : RAW images in BGR color format
//FFmpeg output MP4 file encoded with HEVC codec (using libx265 encoder).
std::string ffmpeg_cmd = std::string("ffmpeg -y -f rawvideo -r ") + std::to_string(fps) +
" -video_size " + std::to_string(width) + "x" + std::to_string(height) +
" -pixel_format bgr24 -i pipe: -vcodec libx265 -crf 24 -pix_fmt yuv420p " + output_filename;
//Execute FFmpeg as sub-process, open stdin pipe (of FFmpeg sub-process) for writing.
//In Windows we need to use _popen and in Linux popen
#ifdef _MSC_VER
FILE* pipeout = _popen(ffmpeg_cmd.c_str(), "wb"); //Windows (ffmpeg.exe must be in the execution path)
#else
//https://batchloaf.wordpress.com/2017/02/12/a-simple-way-to-read-and-write-audio-and-video-files-in-c-using-ffmpeg-part-2-video/
FILE* pipeout = popen(ffmpeg_cmd.c_str(), "w"); //Linux (assume ffmpeg exist in /usr/bin/ffmpeg (and in path).
#endif
for (int i = 0; i < n_frames; i++)
{
//Build synthetic image for testing ("render" a video frame):
cv::Mat frame = cv::Mat(height, width, CV_8UC3);
frame = cv::Scalar(60, 60, 60); //Fill background with dark gray
cv::putText(frame, std::to_string(i+1), cv::Point(width/2 - 100*(int)(std::to_string(i+1).length()), height/2+100), cv::FONT_HERSHEY_DUPLEX, 10, cv::Scalar(255, 30, 30), 20); // Draw a blue number
//cv::imshow("frame", frame); cv::waitKey(1); //Show the frame for testing
//Write width*height*3 bytes to stdin pipe of FFmpeg sub-process (assume frame data is continuous in the RAM).
fwrite(frame.data, 1, (size_t)width*height*3, pipeout);
}
//Flush and close input and output pipes
fflush(pipeout);
#ifdef _MSC_VER
_pclose(pipeout); //Windows
#else
pclose(pipeout); //Linux
#endif
//It looks like we need to wait one more second at the end. //https://stackoverflow.com/a/62804585/4926757
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // sleep for 1 second
return 0;
}
import cv2
import numpy as np
import subprocess as sp
import shlex
width, height, n_frames, fps = 1344, 756, 50, 25 # 50 frames, resolution 1344x756, and 25 fps
output_filename = 'output.mp4'
# Open ffmpeg application as sub-process
# FFmpeg input PIPE: RAW images in BGR color format
# FFmpeg output MP4 file encoded with HEVC codec.
# Arguments list:
# -y Overwrite output file without asking
# -s {width}x{height} Input resolution width x height (1344x756)
# -pixel_format bgr24 Input frame color format is BGR with 8 bits per color component
# -f rawvideo Input format: raw video
# -r {fps} Frame rate: fps (25fps)
# -i pipe: ffmpeg input is a PIPE
# -vcodec libx265 Video codec: H.265 (HEVC)
# -pix_fmt yuv420p Output video color space YUV420 (saving space compared to YUV444)
# -crf 24 Constant quality encoding (lower value for higher quality and larger output file).
# {output_filename} Output file name: output_filename (output.mp4)
process = sp.Popen(shlex.split(f'ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'.split(), stdin=sp.PIPE)
# Build synthetic video frames and write them to ffmpeg input stream.
for i in range(n_frames):
# Build synthetic image for testing ("render" a video frame).
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (255, 30, 30), 20) # Blue number
# Write raw video frame to input stream of ffmpeg sub-process.
process.stdin.write(img.tobytes())
# Close and flush stdin
process.stdin.close()
# Wait for sub-process to finish
process.wait()
# Terminate the sub-process
process.terminate()
ffmpeg
executable must be in the execution path of the Python script.For Linux, in case
ffmpeg
is not in the execution path, you may use the full path:process = sp.Popen(shlex.split(f'/usr/bin/ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'.split(), stdin=sp.PIPE)
(Assuming
ffmpeg
executable is in/usr/bin/
).Python 3's f-Strings syntax requires Python version 3.6 or above.
import cv2
import numpy as np
import subprocess as sp
width, height, n_frames, fps = 1344, 756, 50, 25 # 50 frames, resolution 1344x756, and 25 fps
output_filename = 'output.mp4'
# Open ffmpeg application as sub-process
# FFmpeg input PIPE: RAW images in BGR color format
# FFmpeg output MP4 file encoded with HEVC codec.
# Arguments list:
# -y Overwrite output file without asking
# -s {width}x{height} Input resolution width x height (1344x756)
# -pixel_format bgr24 Input frame color format is BGR with 8 bits per color component
# -f rawvideo Input format: raw video
# -r {fps} Frame rate: fps (25fps)
# -i pipe: ffmpeg input is a PIPE
# -vcodec libx265 Video codec: H.265 (HEVC)
# -pix_fmt yuv420p Output video color space YUV420 (saving space compared to YUV444)
# -crf 24 Constant quality encoding (lower value for higher quality and larger output file).
# {output_filename} Output file name: output_filename (output.mp4)
process = sp.Popen(f'ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'.split(), stdin=sp.PIPE)
# Build synthetic video frames and write them to ffmpeg input stream.
for i in range(n_frames):
# Build synthetic image for testing ("render" a video frame).
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (255, 30, 30), 20) # Blue number
# Write raw video frame to input stream of ffmpeg sub-process.
process.stdin.write(img.tobytes())
# Close and flush stdin
process.stdin.close()
# Wait for sub-process to finish
process.wait()
# Terminate the sub-process
process.terminate()
ffmpeg
executable must be in the execution path of the Python script.For Linux, in case
ffmpeg
is not in the execution path, you may use the full path:process = sp.Popen(f'/usr/bin/ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'.split(), stdin=sp.PIPE)
(Assuming
ffmpeg
executable is in/usr/bin/
).Python 3's f-Strings syntax requires Python version 3.6 or above.
import cv2
import numpy as np
import subprocess as sp
import shlex
width, height, n_frames, fps = 1344, 756, 50, 25 # 50 frames, resolution 1344x756, and 25 fps
output_filename = 'output.mp4'
# Open ffmpeg application as sub-process
# FFmpeg input PIPE: RAW images in BGR color format
# FFmpeg output MP4 file encoded with HEVC codec.
# Arguments list:
# -y Overwrite output file without asking
# -s {width}x{height} Input resolution width x height (1344x756)
# -pixel_format bgr24 Input frame color format is BGR with 8 bits per color component
# -f rawvideo Input format: raw video
# -r {fps} Frame rate: fps (25fps)
# -i pipe: ffmpeg input is a PIPE
# -vcodec libx265 Video codec: H.265 (HEVC)
# -pix_fmt yuv420p Output video color space YUV420 (saving space compared to YUV444)
# -crf 24 Constant quality encoding (lower value for higher quality and larger output file).
# {output_filename} Output file name: output_filename (output.mp4)
process = sp.Popen(shlex.split(f'ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
# Build synthetic video frames and write them to ffmpeg input stream.
for i in range(n_frames):
# Build synthetic image for testing ("render" a video frame).
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (255, 30, 30), 20) # Blue number
# Write raw video frame to input stream of ffmpeg sub-process.
process.stdin.write(img.tobytes())
# Close and flush stdin
process.stdin.close()
# Wait for sub-process to finish
process.wait()
# Terminate the sub-process
process.terminate()
ffmpeg
executable must be in the execution path of the Python script.For Linux, in case
ffmpeg
is not in the execution path, you may use the full path:process = sp.Popen(shlex.split(f'/usr/bin/ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
(Assuming
ffmpeg
executable is in/usr/bin/
).Python 3's f-Strings syntax requires Python version 3.6 or above.