File size: 1,355 Bytes
c6798a7
2fbf82e
 
 
feff2d4
2fbf82e
d92f370
2fbf82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
feff2d4
2fbf82e
 
 
 
c6798a7
b3f2f5e
2fbf82e
bb6f0f4
2fbf82e
 
b3f2f5e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import gradio as gr
import librosa
from tensorflow.keras.models import load_model
import numpy as np
import audio2numpy as a2n
# load model
model = load_model("BBNN_model.hdf5")

# basic variables for mel spectrogram
target_sr = 22050
frame_size = 2048
frame_shift_len = 1024
n_mels = 128

genre_classes = {
    0: "Blues",
    1: "Classical",
    2: "Country",
    3: "Disco",
    4: "Hiphop",
    5: "Jazz",
    6: "Metal",
    7: "Pop",
    8: "Reggae",
    9: "Rock"
}
def get_melspec_feature(X, target_sr, frame_size, frame_shift_len, n_mels):
    melspec_feature = []
    for audio in X:
        audio_melspec = librosa.feature.melspectrogram(y=audio, sr=target_sr, n_fft=frame_size, hop_length=frame_shift_len)
        audio_melspec = librosa.power_to_db(audio_melspec)
        audio_melspec = audio_melspec.T
        melspec_feature.append(audio_melspec)
    return np.array(melspec_feature, dtype=np.float32)


def predict_genre(audio):
    print(audio)
    melspec = get_melspec_feature(audio, target_sr, frame_size, frame_shift_len, n_mels)
    prediction = model.predict(melspec)[0]
    return {genre_classes[i]: float(prediction[i]) for i in range(5)}


iface = gr.Interface(
    predict_genre, 
    inputs=gr.inputs.Audio(),
    outputs=gr.outputs.Label(num_top_classes=5),
    title="Music Genre Classifier",
    live=True
)
iface.launch()