答案:沒有。在Python中沒有單一的庫/解決方案來同時進行視頻/音頻記錄。您必須分開實施這兩種解決方案,並以智能方式合併音頻和視頻信號,最終形成視頻/音頻文件。
我爲您提出的問題得到了解決方案。我的代碼解決了您的三個問題:
- 同時記錄來自網絡攝像頭和麥克風的視頻和音頻。
- 它將最終的視頻/音頻文件保存爲。AVI
- 取消註釋行76,77和78將使視頻在錄製時顯示爲屏幕。
我的解決方案使用pyaudio
音頻錄製,opencv
視頻錄製,併爲ffmpeg
路複用兩個信號。爲了能夠同時記錄,我使用多線程。一個線程記錄視頻,另一個線程記錄音頻。我已經將我的代碼上傳到github,並且在這裏包含了所有必要的部分。
https://github.com/JRodrigoF/AVrecordeR
注:opencv
是不是能夠控制在該網絡攝像頭做了記錄幀率。它只能在文件的編碼中指定所需的最終幀數,但是網絡攝像機通常根據規格和光線條件(我發現)有不同的表現。所以fps必須在代碼級別進行控制。
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
class VideoRecorder():
# Video class based on openCV
def __init__(self):
self.open = True
self.device_index = 0
self.fps = 6 # fps should be the minimum constant rate at which the camera can
self.fourcc = "MJPG" # capture images (with no decrease in speed over time; testing is required)
self.frameSize = (640,480) # video formats and sizes also depend and vary according to the camera used
self.video_filename = "temp_video.avi"
self.video_cap = cv2.VideoCapture(self.device_index)
self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
self.frame_counts = 1
self.start_time = time.time()
# Video starts being recorded
def record(self):
# counter = 1
timer_start = time.time()
timer_current = 0
while(self.open==True):
ret, video_frame = self.video_cap.read()
if (ret==True):
self.video_out.write(video_frame)
# print str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current)
self.frame_counts += 1
# counter += 1
# timer_current = time.time() - timer_start
time.sleep(0.16)
# gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('video_frame', gray)
# cv2.waitKey(1)
else:
break
# 0.16 delay -> 6 fps
#
# Finishes the video recording therefore the thread too
def stop(self):
if self.open==True:
self.open=False
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
else:
pass
# Launches the video recording function using a thread
def start(self):
video_thread = threading.Thread(target=self.record)
video_thread.start()
class AudioRecorder():
# Audio class based on pyAudio and Wave
def __init__(self):
self.open = True
self.rate = 44100
self.frames_per_buffer = 1024
self.channels = 2
self.format = pyaudio.paInt16
self.audio_filename = "temp_audio.wav"
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
# Audio starts being recorded
def record(self):
self.stream.start_stream()
while(self.open == True):
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
if self.open==False:
break
# Finishes the audio recording therefore the thread too
def stop(self):
if self.open==True:
self.open = False
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
pass
# Launches the audio recording function using a thread
def start(self):
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
def start_AVrecording(filename):
global video_thread
global audio_thread
video_thread = VideoRecorder()
audio_thread = AudioRecorder()
audio_thread.start()
video_thread.start()
return filename
def start_video_recording(filename):
global video_thread
video_thread = VideoRecorder()
video_thread.start()
return filename
def start_audio_recording(filename):
global audio_thread
audio_thread = AudioRecorder()
audio_thread.start()
return filename
def stop_AVrecording(filename):
audio_thread.stop()
frame_counts = video_thread.frame_counts
elapsed_time = time.time() - video_thread.start_time
recorded_fps = frame_counts/elapsed_time
print "total frames " + str(frame_counts)
print "elapsed time " + str(elapsed_time)
print "recorded fps " + str(recorded_fps)
video_thread.stop()
# Makes sure the threads have finished
while threading.active_count() > 1:
time.sleep(1)
# Merging audio and video signal
if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected
print "Re-encoding"
cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi"
subprocess.call(cmd, shell=True)
print "Muxing"
cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
else:
print "Normal recording\nMuxing"
cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
print ".."
# Required and wanted processing of final files
def file_manager(filename):
local_path = os.getcwd()
if os.path.exists(str(local_path) + "/temp_audio.wav"):
os.remove(str(local_path) + "/temp_audio.wav")
if os.path.exists(str(local_path) + "/temp_video.avi"):
os.remove(str(local_path) + "/temp_video.avi")
if os.path.exists(str(local_path) + "/temp_video2.avi"):
os.remove(str(local_path) + "/temp_video2.avi")
if os.path.exists(str(local_path) + "/" + filename + ".avi"):
os.remove(str(local_path) + "/" + filename + ".avi")
我沒有看到如何ffmpeg也可以用來顯示視頻,而它被記錄。 (例如,可以將vlc嵌入到wxpython中,或者作爲獨立的無邊框窗口) –