1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
|
#!/bin/env python
# neural style transfer video generator
# 2020-02-11 Oliver Meckmann
from gooey import Gooey, GooeyParser
import subprocess
import os
import glob
import shlex
@Gooey(default_size=(800, 600), terminal_font_color="#FFFFFF", \
header_bg_color="#000000")
def init():
parser = GooeyParser(description = \
"nstvid - Neural Style Transfer Video Creator")
parser.add_argument("style_image", help="Style image", \
widget="FileChooser")
parser.add_argument("input_video", help="Source video", \
widget="FileChooser", default="/home/pux/tmp/nstest/in.mp4")
parser.add_argument("output_dir", help="Where your video is", \
widget="DirChooser", default="/home/pux/tmp/nstest")
parser.add_argument("fps", help="Framerate", default=12)
parser.add_argument("image_size", help="Pixels", default=384)
parser.add_argument("style_weight", default=1000)
parser.add_argument("content_weight", default=5)
params = parser.parse_args()
return params
def main():
print("Running ffmpeg extract images")
subprocess.Popen("ffmpeg -i " + params.input_video \
+ " -r " + params.fps + " -f image2 " \
+ params.output_dir + os.sep + "image-%5d.jpg", \
shell=True).wait()
print("Running ffmpeg extract audio")
subprocess.Popen("ffmpeg -y -i " + params.input_video + " " \
+ params.output_dir + os.sep + "rawaudio.wav", \
shell=True).wait()
left = len(params.output_dir)
for fn in sorted(glob.glob(params.output_dir + os.sep + "image-*.jpg")):
num = fn[left+len(os.linesep)+6:-4]
print("Processing loop: new iteration")
print("Num: " + num + ", input file: " + fn)
command = [
"python", "neural_style.py",
"-style_image", r"" + params.style_image,
"-content_image", params.output_dir + os.sep + "image-" \
+ str(num) + ".jpg",
"-output_image", params.output_dir + os.sep \
+ "generated-" + str(num) + ".jpg",
"-image_size", params.image_size,
"-style_weight", params.style_weight,
"-content_weight", params.content_weight,
"-cudnn_autotune",
"-tv_weight", "000085", "-num_iterations", "1000",
"-print_iter", "50",
"-init", "image", "-optimizer", "lbfgs",
"-learning_rate", "1.0",
"-lbfgs_num_correction", "100",
"-style_scale", "1.0",
"-pooling", "max",
"-backend", "cudnn",
"-seed", "100",
"-content_layers", "relu1_1,relu2_1,relu3_1,relu4_1,relu4_2," \
+ "relu5_1",
"-style_layers", "relu3_1,relu4_1,relu4_2,relu5_1",
"-multidevice_strategy", "4,7,29"]
subprocess.Popen(command).wait()
print("Running opticalflow: num: " + num)
subprocess.Popen( \
"python opticalflow.py "
+ "--input_1 " + params.output_dir \
+ os.sep + "image-" + str(num) + ".jpg " \
+ "--input_1_styled " + params.output_dir
+ os.sep + "generated-" + str(num) + ".jpg " \
+ "--input_2 " + params.output_dir + os.sep + "image-" \
+ str(int(num)+1).zfill(5) + ".jpg " \
+ "--output " + params.output_dir \
+ os.sep + "blended-" + str(num) \
+ ".jpg " \
+ "--alpha 0.55 " \
+ "--flowblend False", shell=True).wait()
print("Running ffmpeg merge")
subprocess.Popen("ffmpeg -y -framerate " + params.fps + " " \
+ "-i " + params.output_dir + os.sep + "blended-%05d.jpg " \
+ "-i " + params.output_dir + os.sep + "rawaudio.wav " \
+ "-c:v libx264 -preset veryslow -qp 0 -pix_fmt yuv420p " \
+ params.output_dir + os.sep + "out.mp4", shell=True).wait()
if __name__ == '__main__':
params = init()
main()
|