Spaces:
Sleeping
Sleeping
File size: 5,708 Bytes
7b5d347 3da12d3 7b5d347 3da12d3 7b5d347 3da12d3 c6544ea 7b5d347 3da12d3 2ddd4e6 3da12d3 2ddd4e6 3da12d3 2ddd4e6 3da12d3 2ddd4e6 3da12d3 c255cb5 3da12d3 2ddd4e6 c255cb5 3da12d3 7b5d347 3da12d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import gradio as gr
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import textwrap
import moviepy.editor as mp
import moviepy.video.fx.all as vfx
css = """
#col-container {
margin: 0 auto;
max-width: 290px;
}
"""
def create_typing_video(code_text, format_choice, line_spacing, width_choice, height_choice, font_name="arial.ttf", font_size=18, frame_rate=10, sound_choice=None, custom_audio=None, background_color="black", text_color="white", enhance_quality=False, video_speed="1.0"):
font_path = f"font/{font_name}"
# Convert font_size to integer
font_size = int(font_size)
font = ImageFont.truetype(font_path, font_size)
video_frames = []
image_width, image_height = int(width_choice), int(height_choice)
max_width = image_width - 40 # Margin of 20 pixels on each side
current_text = ""
background = Image.new("RGB", (image_width, image_height), color=background_color)
while True:
wrapped_lines = textwrap.wrap(code_text, width=max_width // font.getlength(' '))
text_height = sum([font.getbbox(line)[3] - font.getbbox(line)[1] for line in wrapped_lines])
if text_height <= image_height - 40:
break
font_size -= 1
font = ImageFont.truetype(font_path, font_size)
for char in code_text:
current_text += char
if format_choice == "Paragraph":
wrapped_lines = textwrap.wrap(current_text, width=max_width // font.getlength(' '))
else:
wrapped_lines = current_text.splitlines()
image = background.copy()
draw = ImageDraw.Draw(image)
y_position = 20
for line in wrapped_lines:
draw.text((20, y_position), line, font=font, fill=text_color)
line_height = font.getbbox(line)[3] - font.getbbox(line)[1]
y_position += line_height * line_spacing
frame = np.array(image)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
video_frames.append(frame)
video_filename = "typed_code_video.mp4"
out = cv2.VideoWriter(video_filename, cv2.VideoWriter_fourcc(*"mp4v"), frame_rate, (image_width, image_height))
for frame in video_frames:
out.write(frame)
out.release()
speed_factor = {
"1x": 1.0,
"1.25x": 1.25,
"1.5x": 1.5,
"1.75x": 1.75,
"2x": 2.0
}.get(video_speed, 1.0)
video = mp.VideoFileClip(video_filename).fx(vfx.speedx, factor=speed_factor)
video.write_videofile("speed_adjusted_video.mp4", codec="libx264")
video_filename = "speed_adjusted_video.mp4"
if sound_choice and sound_choice != "No Sound":
video = mp.VideoFileClip(video_filename)
audio = mp.AudioFileClip(f"type-sounds/{sound_choice}")
audio = audio.fx(mp.afx.audio_loop, duration=video.duration)
video = video.set_audio(audio)
video.write_videofile("typed_code_video_with_sound.mp4", codec="libx264")
video_filename = "typed_code_video_with_sound.mp4"
if custom_audio:
video = mp.VideoFileClip(video_filename)
audio = mp.AudioFileClip(custom_audio)
audio = audio.fx(mp.afx.audio_loop, duration=video.duration)
video = video.set_audio(audio)
video.write_videofile("typed_code_video_with_custom_audio.mp4", codec="libx264")
video_filename = "typed_code_video_with_custom_audio.mp4"
if enhance_quality:
video = mp.VideoFileClip(video_filename)
video = video.fx(vfx.resize, height=720)
video = video.fx(vfx.colorx, 1.2)
video.write_videofile("enhanced_" + video_filename, codec="libx264")
video_filename = "enhanced_" + video_filename
return video_filename
def generate_video(code_text, format_choice, line_spacing, width_choice, height_choice, font_choice, font_size, sound_choice, custom_audio, background_color, text_color, enhance_quality, video_speed):
return create_typing_video(code_text, format_choice, line_spacing, width_choice, height_choice, font_name=font_choice, font_size=font_size, sound_choice=sound_choice, custom_audio=custom_audio, background_color=background_color, text_color=text_color, enhance_quality=enhance_quality, video_speed=video_speed)
iface = gr.Interface(
fn=generate_video,
inputs=[
gr.Textbox(label="Enter Content", lines=10, placeholder="Enter the text to be displayed in the video..."),
gr.Dropdown(choices=["Paragraph", "Programming"], value="Paragraph", label="Text Format"),
gr.Dropdown(choices=[1.0, 1.15, 1.5, 2.0, 2.5, 3.0], value=1.5, label="Line Spacing"),
gr.Dropdown(choices=["400", "800", "1024", "1280", "1920"], value="800", label="Video Width"),
gr.Dropdown(choices=["400", "720", "1080", "1440", "2160"], value="400", label="Video Height"),
gr.Dropdown(choices=["arial.ttf", "times new roman.ttf"], value="arial.ttf", label="Font Choice"),
gr.Dropdown(choices=["16", "18", "20", "22", "24"], value="18", label="Font Size"),
gr.Dropdown(choices=["No Sound", "Typing.mp3"], value="No Sound", label="Sound Choice"),
gr.File(label="Upload Custom Audio SFX🔊", type="filepath"),
gr.Dropdown(choices=["black", "white"], value="black", label="Background Color"),
gr.Dropdown(choices=["black", "white"], value="white", label="Text Color"),
gr.Checkbox(label="Enhance Video Quality"),
gr.Dropdown(choices=["1x", "1.25x", "1.5x"], value="1x", label="Video Speed"),
],
outputs=gr.Video(label="Typing Video"),
title="Type Bytes🐧",
css=css,
)
if __name__ == "__main__":
iface.launch(share=True) |