-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.py
executable file
·161 lines (135 loc) · 4.92 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
"""Settings for audio reactive LED strip"""
from __future__ import print_function
from __future__ import division
import os
from utils import hsv2rgb
import dsp
configurations = {
'default': {
# GUI Configuration
# ----------------------------------
# Whether or not to display a PyQtGraph GUI plot of visualization
'USE_GUI': True,
# Whether to display debug information
'DEBUG': True,
# Target GUI framerate. Will warn when this can't be met.
'FPS_GUI': 60,
# PulseAudio Configuration
# ----------------------------------------------
# Input and output have to use the same sample-rate and number of channels.
# Any unavoidable differences should be dealt with using alsa's pcm_rate.
# Hardware sample rate
'SAMPLE_RATE': 44100,
# Number of channels
'CHANNELS': 2,
# PulseAudio input mode. Choose from
# ['default_sink', 'default_source', ' sink_by_name', 'source_by_name']
'AUDIO_INPUT_MODE': 'default_sink',
# Full PulseAudio sink/source name.
# Only used for ['sink_by_name', 'source_by_name']
'AUDIO_INPUT_NAME': '',
# LED Output
# ----------------------------------
# IP address(s) of the WLED ESP8266.
'UDP_IP': ['192.168.0.53', '192.168.0.57'],
# Port number used for socket communication between Python and ESP8266
'UDP_PORT': 21324,
# Number of pixels in the LED strip (should match WLED settigs)
'N_PIXELS': 40,
# Target LED framerate. Will warn when this can't be met.
'FPS_LED': 90,
# FFT Settings
# ----------------------------------
# Frequencies below this value will be removed during audio processing
'MIN_FREQUENCY': 50,
# Frequencies above this value will be removed during audio processing
'MAX_FREQUENCY': 18000,
# Number of frequency bins to use when transforming audio to frequency domain
'FFT_N_BINS': 100,
#Length (ms) of the rolling audio window to be used. Will be adjusted to
# improve fft performance.
'FFT_WINDOW_LENGTH': 25
}
}
# Return a dict of available visualizations
def visualizations(config):
import numpy as np
import dsp
N_PIXELS = config['N_PIXELS']
# A valid visualization is a function with the following signature:
# Callable[[np.ndarray[float, FFT_N_BINS], np.ndarray[float, len(FFT_WINDOW)]]
# -> np.ndarray[int, 4, N_PIXELS]]
def visualize_waveform(_, waveform, __):
interpolated = dsp.interpolate(waveform, N_PIXELS)
clipped = np.clip(interpolated - 0.5, 0, 1) * 50
zeros = np.zeros(N_PIXELS);
return np.array([clipped, clipped, clipped, zeros]);
def visualize_spectrum(spectrum, _, __):
interpolated = dsp.interpolate(spectrum, N_PIXELS)
pixels = np.array([
np.clip(1*np.log(interpolated*10), 0, 1),
np.clip(0.3*np.log(interpolated*10), 0, 1),
np.clip(0.3 * interpolated, 0, 1),
np.tile(0, N_PIXELS),
])
return pixels * 255;
smoothing = dsp.ExpFilter(np.tile(1e-1, N_PIXELS), alpha_decay=0.1, alpha_rise=0.7)
def visualize_spectrum_smooth(spectrum, _, __):
interpolated = dsp.interpolate(spectrum, N_PIXELS)
interpolated = smoothing.update(interpolated)
pixels = np.array([
np.clip(1*np.log(interpolated*10), 0, 1),
np.clip(0.3*np.log(interpolated*10), 0, 1),
np.clip(0.3 * interpolated, 0, 1),
np.tile(0, N_PIXELS),
])
return pixels * 255;
def visualize_spectrum_2(y, _, __):
interpolated = dsp.interpolate(y, N_PIXELS)
log_part = np.log(interpolated*10)
log_part /= 3
log_part = 0.5 + np.clip(log_part, 0, 0.5)
def color_from_value (x):
return hsv2rgb(x, 1, x)
colors = np.array([color_from_value(h) for h in log_part]).transpose()
pixels = np.array([
colors[0],
colors[1],
colors[2],
np.clip(0.3 * interpolated, 0, 1),
])
return pixels * 255;
indices = None
fft_smoothing = dsp.ExpFilter(np.tile(1e-1, int(config['fft_samples_per_window'] / 2)), alpha_decay=0.1, alpha_rise=0.7)
def folded_fourier(_, __, fft_data):
nonlocal indices
fft, freqs = fft_data
output = np.zeros((4, N_PIXELS), dtype=np.float64)
fft = np.log(fft / 100)
if (indices == None):
indices = list([0])
f = 55
done = False
while not done:
compatible = max([j for j, freq in enumerate(freqs) if freq < f])
# print(f'{compatible} of {len(freqs)}')
done = compatible == len(freqs) - 1
f *= 2
indices.append(compatible)
# print(indices)
for i in range(1, len(indices) - 1):
prominence = abs(1/(0.49999 - (i / len(indices) - 1)))**2
color = np.tile(hsv2rgb(i / len(indices), 1, prominence), (N_PIXELS, 1)).transpose()
value = np.clip(dsp.interpolate(fft[indices[i-1]:indices[i]], N_PIXELS), 0, 1)
fold = np.array([color[0] * value, color[1] * value, color[2] * value, np.zeros(N_PIXELS)])
# print(f'shapes: {color.shape} and {value.shape} and {fold.shape}')
# print(fold)
output += fold
return output * 255
return {
'smooth': visualize_spectrum_smooth,
'spectrum': visualize_spectrum,
'waveform': visualize_waveform,
'spectrum2': visualize_spectrum_2,
'folded': folded_fourier
}