CS 181 Practical: Classifying the Sounds of NYC

Authors: Blake Bullwinkel, Mark Penrod

In [1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import librosa
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, accuracy_score
from tensorflow.keras.layers import Input, LSTM, Bidirectional, Dense, Dropout
from tensorflow.keras.models import Model, Sequential
from tensorflow.data import Dataset

Data Loading

In [2]:
!gsutil cp gs://cs181_practical_data/Xtrain_amp.npy ./
!gsutil cp gs://cs181_practical_data/ytrain_amp.npy ./

!gsutil cp gs://cs181_practical_data/Xtest_amp.npy ./
!gsutil cp gs://cs181_practical_data/ytest_amp.npy ./

!gsutil cp gs://cs181_practical_data/Xtrain_mel.npy ./
!gsutil cp gs://cs181_practical_data/ytrain_mel.npy ./

!gsutil cp gs://cs181_practical_data/Xtest_mel.npy ./
!gsutil cp gs://cs181_practical_data/ytest_mel.npy ./
Copying gs://cs181_practical_data/Xtrain_amp.npy...
| [1 files][934.2 MiB/934.2 MiB]   65.1 MiB/s                                   
Operation completed over 1 objects/934.2 MiB.                                    
Copying gs://cs181_practical_data/ytrain_amp.npy...
/ [1 files][ 43.5 KiB/ 43.5 KiB]                                                
Operation completed over 1 objects/43.5 KiB.                                     
Copying gs://cs181_practical_data/Xtest_amp.npy...
| [1 files][369.6 MiB/369.6 MiB]                                                
Operation completed over 1 objects/369.6 MiB.                                    
Copying gs://cs181_practical_data/ytest_amp.npy...
/ [1 files][ 17.3 KiB/ 17.3 KiB]                                                
Operation completed over 1 objects/17.3 KiB.                                     
Copying gs://cs181_practical_data/Xtrain_mel.npy...
/ [1 files][235.9 MiB/235.9 MiB]                                                
Operation completed over 1 objects/235.9 MiB.                                    
Copying gs://cs181_practical_data/ytrain_mel.npy...
/ [1 files][ 43.5 KiB/ 43.5 KiB]                                                
Operation completed over 1 objects/43.5 KiB.                                     
Copying gs://cs181_practical_data/Xtest_mel.npy...
- [1 files][ 93.3 MiB/ 93.3 MiB]                                                
Operation completed over 1 objects/93.3 MiB.                                     
Copying gs://cs181_practical_data/ytest_mel.npy...
/ [1 files][ 17.3 KiB/ 17.3 KiB]                                                
Operation completed over 1 objects/17.3 KiB.                                     

Load raw amplitude data

In [3]:
# Load train data
X_amp_train = np.load("Xtrain_amp.npy")
y_amp_train = np.load("ytrain_amp.npy")
In [4]:
X_amp_train.shape
Out[4]:
(5553, 44100)
In [5]:
# Load test data
X_amp_test = np.load("Xtest_amp.npy")
y_amp_test = np.load("ytest_amp.npy")
In [6]:
X_amp_test.shape
Out[6]:
(2197, 44100)

Load Mel spectrogram data

In [7]:
# Load train data
X_mel_train = np.load("Xtrain_mel.npy")
y_mel_train = np.load("ytrain_mel.npy")
In [8]:
X_mel_train.shape
Out[8]:
(5553, 128, 87)
In [9]:
# Load test data
X_mel_test = np.load("Xtest_mel.npy")
y_mel_test = np.load("ytest_mel.npy")
In [10]:
X_mel_test.shape
Out[10]:
(2197, 128, 87)

Generate Spectrogram

In [11]:
def make_spec(amp_i):
    mel_i = librosa.feature.melspectrogram(y=amp_i, sr=22050)
    assert mel_i.shape == (128,87)
    mel_i = mel_i.reshape(1,mel_i.shape[0],mel_i.shape[1])
    return mel_i

def amps_to_specs(fold='train'):
    amps = np.load('X{}_amp.npy'.format(fold))
    ys = np.load('y{}_amp.npy'.format(fold))
    mels = [make_spec(amp) for amp in amps]
    del amps # save memory
    mels = np.concatenate(mels,axis=0)
    np.save('X{}_mel.npy'.format(fold),mels)
    np.save('y{}_mel.npy'.format(fold),ys)
    del mels # save memory

Part A: Feature Engineering (PCA)

In [12]:
# Flatten Mel spectrogram and normalize data
X_mel_train_flatten = X_mel_train.reshape((X_mel_train.shape[0], -1))
X_mel_test_flatten = X_mel_test.reshape((X_mel_test.shape[0], -1))

# normalize the data
amp_scaler = MinMaxScaler().fit(X_amp_train)
mel_scaler = MinMaxScaler().fit(X_mel_train_flatten)

# amplitude
X_amp_train_scaled = amp_scaler.transform(X_amp_train)
X_amp_test_scaled= amp_scaler.transform(X_amp_test)

# mel spectrogram
X_mel_train_scaled = mel_scaler.transform(X_mel_train_flatten)
X_mel_test_scaled= mel_scaler.transform(X_mel_test_flatten)
In [13]:
# Get PCA components

# amplitude
amp_PCA = PCA(n_components=500).fit(X_amp_train_scaled)
PCA_amp_train = amp_PCA.transform(X_amp_train_scaled)
PCA_amp_test = amp_PCA.transform(X_amp_test_scaled)

# mel spectrogram
mel_PCA = PCA(n_components=500).fit(X_mel_train_scaled)
PCA_mel_train = mel_PCA.transform(X_mel_train_scaled)
PCA_mel_test = mel_PCA.transform(X_mel_test_scaled)
In [14]:
# variance explained
print(f'The first 500 PCs account for {np.sum(amp_PCA.explained_variance_ratio_)*100:0.3}% of the variation in the Amplitude data')
print()
print(f'The first 500 PCs account for {np.sum(mel_PCA.explained_variance_ratio_)*100:0.3}% of the variation in training Mel spectrogram data')
The first 500 PCs account for 60.1% of the variation in the Amplitude data

The first 500 PCs account for 92.6% of the variation in training Mel spectrogram data
In [15]:
# function to print accuracy
def display_accuracies(data_type, y_train, y_test, train_preds, test_preds):

    # confusion matrix
    train_conf_mtx = confusion_matrix(y_train, train_preds)
    test_conf_mtx = confusion_matrix(y_test, test_preds)
    # compute accuracies from confusion matrix
    accs_train = [train_conf_mtx[i][i]/np.sum(train_conf_mtx[i,:]) for i in range(10)]
    accs_test = [test_conf_mtx[i][i]/np.sum(test_conf_mtx[i,:]) for i in range(10)]

    # display
    print(f'Class Accuracies {data_type}')
    print('----------------------------')
    print('Train')
    for i in range(10):
    print(f'Class {i} accuracy: {accs_train[i] * 100:0.3}%')
    print(f'Overall accuracy: {accuracy_score(y_train, train_preds)*100:0.3}%')

    print('\nTest:')
    for i in range(10):
    print(f'Class {i} accuracy: {accs_test[i] * 100:0.3}%')
    print(f'Overall accuracy: {accuracy_score(y_test, test_preds)*100:0.3}%')

    # return accuracies for plotting
    return accs_train, accs_test
In [16]:
# save accuracies for plotting
amp_train_accs_by_model = []
amp_test_accs_by_model = []

mel_train_accs_by_model = []
mel_test_accs_by_model = []

Naïve Model

In [17]:
# naive model: predict class based on frequency in data set
classes = range(10)
probs = np.array([12.6, 3.54, 12.53, 9.42, 10.93, 12.98, 1.49, 11.85, 12.03, 12.61])
probs = probs/sum(probs)
naive_train_preds = np.random.choice(classes, size=len(y_amp_train), p=probs)
naive_test_preds = np.random.choice(classes, size=len(y_amp_test), p=probs)
In [18]:
# Report accuracy
amp_train_accs_m0, amp_test_accs_m0 = display_accuracies('Amplitude', y_amp_train, y_amp_test, naive_train_preds, naive_test_preds)
print()
mel_train_accs_m0, mel_test_accs_m0 = display_accuracies('Mel spectrogram', y_mel_train, y_mel_test, naive_train_preds, naive_test_preds)

# save
amp_train_accs_by_model.append(amp_train_accs_m0)
amp_test_accs_by_model.append(amp_test_accs_m0)

mel_train_accs_by_model.append(mel_train_accs_m0)
mel_test_accs_by_model.append(mel_test_accs_m0)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 10.0%
Class 1 accuracy: 6.09%
Class 2 accuracy: 11.4%
Class 3 accuracy: 9.56%
Class 4 accuracy: 12.0%
Class 5 accuracy: 15.5%
Class 6 accuracy: 1.2%
Class 7 accuracy: 9.42%
Class 8 accuracy: 11.7%
Class 9 accuracy: 15.1%
Overall accuracy: 11.6%

Test:
Class 0 accuracy: 8.0%
Class 1 accuracy: 5.13%
Class 2 accuracy: 11.0%
Class 3 accuracy: 8.73%
Class 4 accuracy: 10.2%
Class 5 accuracy: 12.9%
Class 6 accuracy: 0.0%
Class 7 accuracy: 9.75%
Class 8 accuracy: 12.7%
Class 9 accuracy: 16.0%
Overall accuracy: 11.0%

Class Accuracies Mel spectrogram
----------------------------
Train
Class 0 accuracy: 10.0%
Class 1 accuracy: 6.09%
Class 2 accuracy: 11.4%
Class 3 accuracy: 9.56%
Class 4 accuracy: 12.0%
Class 5 accuracy: 15.5%
Class 6 accuracy: 1.2%
Class 7 accuracy: 9.42%
Class 8 accuracy: 11.7%
Class 9 accuracy: 15.1%
Overall accuracy: 11.6%

Test:
Class 0 accuracy: 8.0%
Class 1 accuracy: 5.13%
Class 2 accuracy: 11.0%
Class 3 accuracy: 8.73%
Class 4 accuracy: 10.2%
Class 5 accuracy: 12.9%
Class 6 accuracy: 0.0%
Class 7 accuracy: 9.75%
Class 8 accuracy: 12.7%
Class 9 accuracy: 16.0%
Overall accuracy: 11.0%

Logistic Regression

In [ ]:
# train logistic regression using PCA

# Amplitude 
log_model_amp_m1 = LogisticRegression(multi_class='multinomial', penalty='none', max_iter=2000).fit(PCA_amp_train, y_amp_train)
amp_train_preds_m1 = log_model_amp_m1.predict(PCA_amp_train)
amp_test_preds_m1 = log_model_amp_m1.predict(PCA_amp_test)

# Mel spectrogram 
log_model_mel_m1 = LogisticRegression(multi_class='multinomial', penalty='none', max_iter=2000).fit(PCA_mel_train, y_mel_train)
mel_train_preds_m1 = log_model_mel_m1.predict(PCA_mel_train)
mel_test_preds_m1 = log_model_mel_m1.predict(PCA_mel_test)
/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as shown in:
    https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
  extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
In [ ]:
# Report accuracy
amp_train_accs_m1, amp_test_accs_m1 = display_accuracies('Amplitude', y_amp_train, y_amp_test, amp_train_preds_m1, amp_test_preds_m1)
print()
mel_train_accs_m1, mel_test_accs_m1 = display_accuracies('Mel spectrogram', y_mel_train, y_mel_test, mel_train_preds_m1, mel_test_preds_m1)

# save
amp_train_accs_by_model.append(amp_train_accs_m1)
amp_test_accs_by_model.append(amp_test_accs_m1)

mel_train_accs_by_model.append(mel_train_accs_m1)
mel_test_accs_by_model.append(mel_test_accs_m1)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 41.9%
Class 1 accuracy: 38.6%
Class 2 accuracy: 50.4%
Class 3 accuracy: 30.8%
Class 4 accuracy: 36.4%
Class 5 accuracy: 45.2%
Class 6 accuracy: 63.9%
Class 7 accuracy: 40.9%
Class 8 accuracy: 41.8%
Class 9 accuracy: 40.0%
Overall accuracy: 41.6%

Test:
Class 0 accuracy: 20.7%
Class 1 accuracy: 7.69%
Class 2 accuracy: 51.8%
Class 3 accuracy: 6.55%
Class 4 accuracy: 9.47%
Class 5 accuracy: 14.0%
Class 6 accuracy: 6.67%
Class 7 accuracy: 10.6%
Class 8 accuracy: 12.3%
Class 9 accuracy: 15.7%
Overall accuracy: 18.2%

Class Accuracies Mel spectrogram
----------------------------
Train
Class 0 accuracy: 73.4%
Class 1 accuracy: 92.4%
Class 2 accuracy: 80.3%
Class 3 accuracy: 77.4%
Class 4 accuracy: 91.3%
Class 5 accuracy: 70.9%
Class 6 accuracy: 96.4%
Class 7 accuracy: 91.6%
Class 8 accuracy: 77.2%
Class 9 accuracy: 73.7%
Overall accuracy: 80.0%

Test:
Class 0 accuracy: 26.3%
Class 1 accuracy: 43.6%
Class 2 accuracy: 49.5%
Class 3 accuracy: 24.9%
Class 4 accuracy: 33.3%
Class 5 accuracy: 19.7%
Class 6 accuracy: 70.0%
Class 7 accuracy: 30.5%
Class 8 accuracy: 45.3%
Class 9 accuracy: 19.0%
Overall accuracy: 31.8%

Logistic Regression + L2 Regularization

In [ ]:
# train logistic regression using PCA

# Amplitude 
log_model_amp_m2 = LogisticRegression(multi_class='multinomial', max_iter=2000).fit(PCA_amp_train, y_amp_train)
amp_train_preds_m2 = log_model_amp_m2.predict(PCA_amp_train)
amp_test_preds_m2 = log_model_amp_m2.predict(PCA_amp_test)

# Mel spectrogram 
log_model_mel_m2 = LogisticRegression(multi_class='multinomial', max_iter=2000).fit(PCA_mel_train, y_mel_train)
mel_train_preds_m2 = log_model_mel_m2.predict(PCA_mel_train)
mel_test_preds_m2 = log_model_mel_m2.predict(PCA_mel_test)
In [ ]:
# Report accuracy
amp_train_accs_m2, amp_test_accs_m2 = display_accuracies('Amplitude', y_amp_train, y_amp_test, amp_train_preds_m2, amp_test_preds_m2)
print()
mel_train_accs_m2, mel_test_accs_m2 = display_accuracies('Mel spectrogram', y_mel_train, y_mel_test, mel_train_preds_m2, mel_test_preds_m2)

# save
amp_train_accs_by_model.append(amp_train_accs_m2)
amp_test_accs_by_model.append(amp_test_accs_m2)

mel_train_accs_by_model.append(mel_train_accs_m2)
mel_test_accs_by_model.append(mel_test_accs_m2)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 40.1%
Class 1 accuracy: 31.5%
Class 2 accuracy: 57.2%
Class 3 accuracy: 28.7%
Class 4 accuracy: 35.4%
Class 5 accuracy: 42.7%
Class 6 accuracy: 57.8%
Class 7 accuracy: 41.9%
Class 8 accuracy: 41.0%
Class 9 accuracy: 38.3%
Overall accuracy: 41.1%

Test:
Class 0 accuracy: 22.0%
Class 1 accuracy: 2.56%
Class 2 accuracy: 61.2%
Class 3 accuracy: 4.8%
Class 4 accuracy: 9.85%
Class 5 accuracy: 16.3%
Class 6 accuracy: 3.33%
Class 7 accuracy: 11.4%
Class 8 accuracy: 11.0%
Class 9 accuracy: 17.3%
Overall accuracy: 19.8%

Class Accuracies Mel spectrogram
----------------------------
Train
Class 0 accuracy: 47.0%
Class 1 accuracy: 44.7%
Class 2 accuracy: 86.5%
Class 3 accuracy: 40.2%
Class 4 accuracy: 63.1%
Class 5 accuracy: 45.1%
Class 6 accuracy: 63.9%
Class 7 accuracy: 56.2%
Class 8 accuracy: 47.2%
Class 9 accuracy: 42.1%
Overall accuracy: 53.5%

Test:
Class 0 accuracy: 26.3%
Class 1 accuracy: 56.4%
Class 2 accuracy: 85.6%
Class 3 accuracy: 19.2%
Class 4 accuracy: 40.2%
Class 5 accuracy: 22.7%
Class 6 accuracy: 30.0%
Class 7 accuracy: 37.3%
Class 8 accuracy: 41.9%
Class 9 accuracy: 27.0%
Overall accuracy: 38.4%

Logistic + L2 + Class Weighting

In [ ]:
# train logistic regression using PCA

# Amplitude 
log_model_amp_m3 = LogisticRegression(multi_class='multinomial', class_weight='balanced', max_iter=2000).fit(PCA_amp_train, y_amp_train)
amp_train_preds_m3 = log_model_amp_m3.predict(PCA_amp_train)
amp_test_preds_m3 = log_model_amp_m3.predict(PCA_amp_test)

# Mel spectrogram 
log_model_mel_m3 = LogisticRegression(multi_class='multinomial', class_weight='balanced', max_iter=2000).fit(PCA_mel_train, y_mel_train)
mel_train_preds_m3 = log_model_mel_m3.predict(PCA_mel_train)
mel_test_preds_m3 = log_model_mel_m3.predict(PCA_mel_test)
In [ ]:
# Report accuracy
amp_train_accs_m3, amp_test_accs_m3 = display_accuracies('Amplitude', y_amp_train, y_amp_test, amp_train_preds_m3, amp_test_preds_m3)
print()
mel_train_accs_m3, mel_test_accs_m3 = display_accuracies('Mel spectrogram', y_mel_train, y_mel_test, mel_train_preds_m3, mel_test_preds_m3)

# save
amp_train_accs_by_model.append(amp_train_accs_m3)
amp_test_accs_by_model.append(amp_test_accs_m3)

mel_train_accs_by_model.append(mel_train_accs_m3)
mel_test_accs_by_model.append(mel_test_accs_m3)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 36.7%
Class 1 accuracy: 56.3%
Class 2 accuracy: 48.7%
Class 3 accuracy: 36.9%
Class 4 accuracy: 37.1%
Class 5 accuracy: 38.7%
Class 6 accuracy: 75.9%
Class 7 accuracy: 38.8%
Class 8 accuracy: 38.9%
Class 9 accuracy: 34.1%
Overall accuracy: 40.0%

Test:
Class 0 accuracy: 22.0%
Class 1 accuracy: 15.4%
Class 2 accuracy: 57.2%
Class 3 accuracy: 8.73%
Class 4 accuracy: 11.7%
Class 5 accuracy: 14.8%
Class 6 accuracy: 10.0%
Class 7 accuracy: 9.32%
Class 8 accuracy: 11.9%
Class 9 accuracy: 14.7%
Overall accuracy: 19.6%

Class Accuracies Mel spectrogram
----------------------------
Train
Class 0 accuracy: 46.1%
Class 1 accuracy: 67.5%
Class 2 accuracy: 82.3%
Class 3 accuracy: 45.9%
Class 4 accuracy: 63.9%
Class 5 accuracy: 42.3%
Class 6 accuracy: 84.3%
Class 7 accuracy: 58.8%
Class 8 accuracy: 46.3%
Class 9 accuracy: 39.0%
Overall accuracy: 54.0%

Test:
Class 0 accuracy: 26.3%
Class 1 accuracy: 76.9%
Class 2 accuracy: 84.3%
Class 3 accuracy: 27.9%
Class 4 accuracy: 38.6%
Class 5 accuracy: 23.1%
Class 6 accuracy: 40.0%
Class 7 accuracy: 38.6%
Class 8 accuracy: 41.9%
Class 9 accuracy: 24.3%
Overall accuracy: 39.3%
In [ ]:
# plot via transforming to dataframe

# Amplitude Train
amp_train_data = pd.DataFrame(np.transpose(amp_train_accs_by_model), columns=['Naïve Model', 'Model 1', 'Model 2', 'Model 3'])
amp_train_plot = amp_train_data.plot.bar()
amp_train_plot.set_xticklabels(range(10), rotation=0)
amp_train_plot.set_ylim(0,1)
amp_train_plot.set_xlabel('Class')
amp_train_plot.set_ylabel('Accuracy')
amp_train_plot.legend(bbox_to_anchor=(1.4, 1))
amp_train_plot.set_title('Class Accuracies by Model – Amplitude (train)');
In [ ]:
# Amplitude Test
amp_test_data = pd.DataFrame(np.transpose(amp_test_accs_by_model), columns=['Naïve Model', 'Model 1', 'Model 2', 'Model 3'])
amp_test_plot = amp_test_data.plot.bar()
amp_test_plot.set_xticklabels(range(10), rotation=0)
amp_test_plot.set_ylim(0,1)
amp_test_plot.set_xlabel('Class')
amp_test_plot.set_ylabel('Accuracy')
amp_test_plot.legend(bbox_to_anchor=(1.4, 1))
amp_test_plot.set_title('Class Accuracies by Model – Amplitude (test)');
In [ ]:
# Mel spectrogram Train
mel_train_data = pd.DataFrame(np.transpose(mel_train_accs_by_model), columns=['Naïve Model', 'Model 1', 'Model 2', 'Model 3'])
mel_train_plot = mel_train_data.plot.bar()
mel_train_plot.set_xticklabels(range(10), rotation=0)
mel_train_plot.set_ylim(0,1)
mel_train_plot.set_xlabel('Class')
mel_train_plot.set_ylabel('Accuracy')
mel_train_plot.legend(bbox_to_anchor=(1.05, 1))
mel_train_plot.set_title('Class Accuracies by Model – Mel spectrogram (train)');
In [ ]:
# Mel spectrogram Test
mel_test_data = pd.DataFrame(np.transpose(mel_test_accs_by_model), columns=['Naïve Model', 'Model 1', 'Model 2', 'Model 3'])
mel_test_plot = mel_test_data.plot.bar()
mel_test_plot.set_xticklabels(range(10), rotation=0)
mel_test_plot.set_ylim(0,1)
mel_test_plot.set_xlabel('Class')
mel_test_plot.set_ylabel('Accuracy')
mel_test_plot.legend(bbox_to_anchor=(1.4, 1))
mel_test_plot.set_title('Class Accuracies by Model – Mel spectrogram (test)');

Part B: Model Classes

In [ ]:
from imblearn.over_sampling import SMOTE

# Balance the amplitude training data
sm = SMOTE()
X_amp_train_balanced, y_amp_train_balanced = sm.fit_resample(X_amp_train, y_amp_train)

# normalize the data
amp_scaler = MinMaxScaler().fit(X_amp_train_balanced)

# amplitude
X_amp_train_balanced_scaled = amp_scaler.transform(X_amp_train_balanced)
X_amp_test_scaled = amp_scaler.transform(X_amp_test)

# Get PCA components
# amplitude
amp_PCA_balanced = PCA(n_components=500).fit(X_amp_train_balanced_scaled)
PCA_amp_train_balanced = amp_PCA_balanced.transform(X_amp_train_balanced_scaled)
PCA_amp_test = amp_PCA_balanced.transform(X_amp_test_scaled)
In [ ]:
# Train kNN classifiers on the balanced amplitude PCA data

# Amplitude data
amp_knn = KNeighborsClassifier(n_neighbors=3)
amp_knn.fit(PCA_amp_train_balanced, y_amp_train_balanced)
amp_knn_train_pred = amp_knn.predict(PCA_amp_train_balanced)
amp_knn_test_pred = amp_knn.predict(PCA_amp_test)

knn_amp_train_accs, knn_amp_test_accs = display_accuracies('Amplitude', y_amp_train_balanced, y_amp_test, amp_knn_train_pred, amp_knn_test_pred)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 83.2%
Class 1 accuracy: 99.0%
Class 2 accuracy: 52.6%
Class 3 accuracy: 77.3%
Class 4 accuracy: 48.7%
Class 5 accuracy: 40.6%
Class 6 accuracy: 99.3%
Class 7 accuracy: 39.4%
Class 8 accuracy: 52.1%
Class 9 accuracy: 23.4%
Overall accuracy: 61.6%

Test:
Class 0 accuracy: 37.7%
Class 1 accuracy: 10.3%
Class 2 accuracy: 14.0%
Class 3 accuracy: 25.3%
Class 4 accuracy: 6.82%
Class 5 accuracy: 15.2%
Class 6 accuracy: 20.0%
Class 7 accuracy: 1.27%
Class 8 accuracy: 37.3%
Class 9 accuracy: 4.0%
Overall accuracy: 17.5%

kNN Classifier

In [19]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
In [20]:
# Train kNN classifiers on the PCA data

# Amplitude data
amp_knn = KNeighborsClassifier(n_neighbors=3)
amp_knn.fit(PCA_amp_train, y_amp_train)
amp_knn_train_pred = amp_knn.predict(PCA_amp_train)
amp_knn_test_pred = amp_knn.predict(PCA_amp_test)

# Spectrogram data
mel_knn = KNeighborsClassifier(n_neighbors=5)
mel_knn.fit(PCA_mel_train, y_mel_train)
mel_knn_train_pred = mel_knn.predict(PCA_mel_train)
mel_knn_test_pred = mel_knn.predict(PCA_mel_test)
In [21]:
# Report accuracy
knn_amp_train_accs, knn_amp_test_accs = display_accuracies('Amplitude', y_amp_train, y_amp_test, amp_knn_train_pred, amp_knn_test_pred)
print()
knn_mel_train_accs, knn_mel_test_accs = display_accuracies('Mel spectrogram', y_mel_train, y_mel_test, mel_knn_train_pred, mel_knn_test_pred)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 87.0%
Class 1 accuracy: 66.0%
Class 2 accuracy: 59.9%
Class 3 accuracy: 54.3%
Class 4 accuracy: 30.1%
Class 5 accuracy: 41.1%
Class 6 accuracy: 36.1%
Class 7 accuracy: 23.7%
Class 8 accuracy: 50.0%
Class 9 accuracy: 21.9%
Overall accuracy: 46.7%

Test:
Class 0 accuracy: 43.7%
Class 1 accuracy: 2.56%
Class 2 accuracy: 18.4%
Class 3 accuracy: 19.2%
Class 4 accuracy: 4.17%
Class 5 accuracy: 17.4%
Class 6 accuracy: 6.67%
Class 7 accuracy: 0.0%
Class 8 accuracy: 42.8%
Class 9 accuracy: 2.0%
Overall accuracy: 18.1%

Class Accuracies Mel spectrogram
----------------------------
Train
Class 0 accuracy: 93.6%
Class 1 accuracy: 68.5%
Class 2 accuracy: 61.5%
Class 3 accuracy: 52.4%
Class 4 accuracy: 86.0%
Class 5 accuracy: 92.1%
Class 6 accuracy: 75.9%
Class 7 accuracy: 91.8%
Class 8 accuracy: 77.1%
Class 9 accuracy: 37.9%
Overall accuracy: 74.3%

Test:
Class 0 accuracy: 35.0%
Class 1 accuracy: 46.2%
Class 2 accuracy: 38.5%
Class 3 accuracy: 19.7%
Class 4 accuracy: 56.4%
Class 5 accuracy: 31.8%
Class 6 accuracy: 43.3%
Class 7 accuracy: 37.3%
Class 8 accuracy: 60.2%
Class 9 accuracy: 14.3%
Overall accuracy: 36.5%

Random Forest Classifier

In [22]:
# Train random forest classifiers on the PCA data

# Amplitude data
amp_rfc = RandomForestClassifier(n_estimators=100, max_depth=20, min_samples_leaf=1)
amp_rfc.fit(PCA_amp_train, y_amp_train)
amp_rfc_train_pred = amp_knn.predict(PCA_amp_train)
amp_rfc_test_pred = amp_knn.predict(PCA_amp_test)

# Spectrogram data
mel_rfc = RandomForestClassifier(n_estimators=100, max_depth=20, min_samples_leaf=1)
mel_rfc.fit(PCA_mel_train, y_mel_train)
mel_rfc_train_pred = mel_knn.predict(PCA_mel_train)
mel_rfc_test_pred = mel_knn.predict(PCA_mel_test)
In [23]:
# Report accuracy
rfc_amp_train_accs, rfc_amp_test_accs = display_accuracies('Amplitude', y_amp_train, y_amp_test, amp_rfc_train_pred, amp_rfc_test_pred)
print()
rfc_mel_train_accs, rfc_mel_test_accs = display_accuracies('Mel spectrogram', y_mel_train, y_mel_test, mel_rfc_train_pred, mel_rfc_test_pred)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 87.0%
Class 1 accuracy: 66.0%
Class 2 accuracy: 59.9%
Class 3 accuracy: 54.3%
Class 4 accuracy: 30.1%
Class 5 accuracy: 41.1%
Class 6 accuracy: 36.1%
Class 7 accuracy: 23.7%
Class 8 accuracy: 50.0%
Class 9 accuracy: 21.9%
Overall accuracy: 46.7%

Test:
Class 0 accuracy: 43.7%
Class 1 accuracy: 2.56%
Class 2 accuracy: 18.4%
Class 3 accuracy: 19.2%
Class 4 accuracy: 4.17%
Class 5 accuracy: 17.4%
Class 6 accuracy: 6.67%
Class 7 accuracy: 0.0%
Class 8 accuracy: 42.8%
Class 9 accuracy: 2.0%
Overall accuracy: 18.1%

Class Accuracies Mel spectrogram
----------------------------
Train
Class 0 accuracy: 93.6%
Class 1 accuracy: 68.5%
Class 2 accuracy: 61.5%
Class 3 accuracy: 52.4%
Class 4 accuracy: 86.0%
Class 5 accuracy: 92.1%
Class 6 accuracy: 75.9%
Class 7 accuracy: 91.8%
Class 8 accuracy: 77.1%
Class 9 accuracy: 37.9%
Overall accuracy: 74.3%

Test:
Class 0 accuracy: 35.0%
Class 1 accuracy: 46.2%
Class 2 accuracy: 38.5%
Class 3 accuracy: 19.7%
Class 4 accuracy: 56.4%
Class 5 accuracy: 31.8%
Class 6 accuracy: 43.3%
Class 7 accuracy: 37.3%
Class 8 accuracy: 60.2%
Class 9 accuracy: 14.3%
Overall accuracy: 36.5%

Feed Forward Neural Network

In [24]:
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import to_categorical

# Train FFNN on the amplitude data

# One-hot encode the class labels
y_amp_train_categorical = to_categorical(y_amp_train)
y_amp_test_categorical = to_categorical(y_amp_test)
y_mel_train_categorical = to_categorical(y_mel_train)
y_mel_test_categorical = to_categorical(y_mel_test)

# Define the FFNN
def ffnn():
    ffnn = Sequential()
    ffnn.add(Dense(500, input_dim=500, activation='relu'))
    ffnn.add(Dropout(0.5))
    ffnn.add(Dense(300, activation='relu'))
    ffnn.add(Dropout(0.5))
    ffnn.add(Dense(100, activation='relu'))
    ffnn.add(Dense(10, activation='softmax'))
    ffnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return ffnn

# Amplitude data
amp_ffnn = ffnn()
amp_ffnn.fit(PCA_amp_train, y_amp_train_categorical, verbose=0, epochs=100)
amp_ffnn_train_pred = amp_ffnn.predict(PCA_amp_train)
amp_ffnn_test_pred = amp_ffnn.predict(PCA_amp_test)
amp_ffnn_train_pred = [np.argmax(pred) for pred in amp_ffnn_train_pred]
amp_ffnn_test_pred = [np.argmax(pred) for pred in amp_ffnn_test_pred]

# Spectrogram data
mel_ffnn = ffnn()
mel_ffnn.fit(PCA_mel_train, y_mel_train_categorical, verbose=0, epochs=100)
mel_ffnn_train_pred = mel_ffnn.predict(PCA_mel_train)
mel_ffnn_test_pred = mel_ffnn.predict(PCA_mel_test)
mel_ffnn_train_pred = [np.argmax(pred) for pred in mel_ffnn_train_pred]
mel_ffnn_test_pred = [np.argmax(pred) for pred in mel_ffnn_test_pred]
In [25]:
# Report accuracy
ffnn_amp_train_accs, ffnn_amp_test_accs = display_accuracies('Amplitude', y_amp_train, y_amp_test, amp_ffnn_train_pred, amp_ffnn_test_pred)
print()
ffnn_mel_train_accs, ffnn_mel_test_accs = display_accuracies('Mel spectrogram', y_mel_train, y_mel_test, mel_ffnn_train_pred, mel_ffnn_test_pred)
Class Accuracies Amplitude
----------------------------
Train
Class 0 accuracy: 99.9%
Class 1 accuracy: 98.0%
Class 2 accuracy: 97.6%
Class 3 accuracy: 95.8%
Class 4 accuracy: 98.7%
Class 5 accuracy: 1e+02%
Class 6 accuracy: 96.4%
Class 7 accuracy: 99.5%
Class 8 accuracy: 1e+02%
Class 9 accuracy: 98.1%
Overall accuracy: 98.7%

Test:
Class 0 accuracy: 27.0%
Class 1 accuracy: 2.56%
Class 2 accuracy: 30.4%
Class 3 accuracy: 13.1%
Class 4 accuracy: 16.7%
Class 5 accuracy: 34.8%
Class 6 accuracy: 6.67%
Class 7 accuracy: 14.0%
Class 8 accuracy: 42.4%
Class 9 accuracy: 21.7%
Overall accuracy: 24.5%

Class Accuracies Mel spectrogram
----------------------------
Train
Class 0 accuracy: 80.9%
Class 1 accuracy: 94.9%
Class 2 accuracy: 85.5%
Class 3 accuracy: 81.1%
Class 4 accuracy: 97.2%
Class 5 accuracy: 84.5%
Class 6 accuracy: 94.0%
Class 7 accuracy: 97.7%
Class 8 accuracy: 96.7%
Class 9 accuracy: 83.1%
Overall accuracy: 88.6%

Test:
Class 0 accuracy: 23.3%
Class 1 accuracy: 66.7%
Class 2 accuracy: 43.8%
Class 3 accuracy: 40.6%
Class 4 accuracy: 56.4%
Class 5 accuracy: 56.1%
Class 6 accuracy: 50.0%
Class 7 accuracy: 30.9%
Class 8 accuracy: 83.1%
Class 9 accuracy: 40.3%
Overall accuracy: 46.5%

Class Accuracies

In [26]:
def plot_class_accuracies(accuracies, model_names, data_label, train_or_test, anchor):
    data = pd.DataFrame(np.transpose(accuracies), columns=model_names)
    accs_plot = data.plot.bar()
    accs_plot.set_xticklabels(range(len(accuracies[0])), rotation=0)
    accs_plot.set_ylim(0,1.1)
    accs_plot.set_xlabel('Class')
    accs_plot.set_ylabel('Accuracy')
    accs_plot.legend(bbox_to_anchor=anchor)
    accs_plot.set_title(f'Class Accuracy by Model – {data_label} ({train_or_test})')
    plt.show()
In [27]:
# Amplitude train
plot_class_accuracies([knn_amp_train_accs, rfc_amp_train_accs, ffnn_amp_train_accs],
                      ['KNN', 'Random Forest', 'FFNN'],
                      'Amplitude',
                      'train',
                      anchor=(1.05,1))
In [28]:
# Amplitude test
plot_class_accuracies([knn_amp_test_accs, rfc_amp_test_accs, ffnn_amp_test_accs],
                      ['KNN', 'Random Forest', 'FFNN'],
                      'Amplitude',
                      'test',
                      anchor=(1.4,1))
In [29]:
# Mel spectrogram train
plot_class_accuracies([knn_mel_train_accs, rfc_mel_train_accs, ffnn_mel_train_accs],
                      ['KNN', 'Random Forest', 'FFNN'],
                      'Mel spectrogram',
                      'train',
                      anchor=(1.4,1))
In [30]:
# Mel spectrogram test
plot_class_accuracies([knn_mel_test_accs, rfc_mel_test_accs, ffnn_mel_test_accs],
                      ['KNN', 'Random Forest', 'FFNN'],
                      'Mel spectrogram',
                      'test',
                      anchor=(1.4,1))

Part C: Hyperparameter Tuning and Validation

In [ ]:
from sklearn.model_selection import ParameterGrid

# Define a function to do hyper-parameter optimization via grid search
def grid_search(inputTrain, targetTrain, inputTest, 
                targetTest, max_depth, min_samples_leaf):
    '''
    INPUTS
    ======
    inputTrain: list or np.ndarray of training input
    targetTrain: list or np.ndarray of training target
    inputTest: list or np.ndarray of test input
    targestTest: list or np.ndarray of test target
    max_depth: list of np.ndarray of max_depth values to search
    min_samples_leaf: list of np.ndarray of min_samples_leaf values to search
    
    RETURNS
    =======
    accuracy_vals: 2d np.ndarray of test (overall) accuracy values
    '''
    # Set the parameter grid
    param_grid = {'max_depth': max_depth, 'min_samples_leaf': min_samples_leaf}

    # Set the grid to be searched
    grid = ParameterGrid(param_grid)

    # Initialize a 2d numpy array to hold accuracy values
    accuracy_vals = np.zeros((len(min_samples_leaf), len(max_depth)))

    # Loop through the grid parameters
    for params in grid:

        # Get spectral radius value and index
        max_depth_val = params['max_depth']
        max_depth_idx = np.where(max_depth==max_depth_val)[0][0]

        # Get sparsity value and index
        min_samples_leaf_val = params['min_samples_leaf']
        min_samples_leaf_idx = np.where(min_samples_leaf==min_samples_leaf_val)[0][0]

        # Initialize a random forest classifier
        rfc = RandomForestClassifier(
            n_estimators=100, 
            max_depth=max_depth_val, 
            min_samples_leaf=min_samples_leaf_val)

        # Fit to the training data
        rfc.fit(inputTrain, targetTrain)

        # Predict on the test data
        test_pred = rfc.predict(inputTest)

        # Calculate the accuracy of the predictions
        test_accuracy = accuracy_score(targetTest, test_pred)

        # Insert the accuracy in accuracy_vals
        accuracy_vals[min_samples_leaf_idx][max_depth_idx] = test_accuracy
        
    # Return the 2d array of accuracy values
    return accuracy_vals
In [ ]:
# Set values of max_depth and min_samples_leaf to search
max_depth_vals = np.array([20, 30, 40, 50, 60])
min_samples_leaf_vals = np.array([1, 2, 3, 4, 5])

# Perform grid search for amplitude data
amp_accuracy_vals = grid_search(inputTrain=PCA_amp_train, targetTrain=y_amp_train, inputTest=PCA_amp_test, 
                targetTest=y_amp_test, max_depth=max_depth_vals, min_samples_leaf=min_samples_leaf_vals)
In [ ]:
# Plot a heatmap to visualize the hyper-parameter search
plt.figure(figsize=(8,5))
plt.imshow(amp_accuracy_vals, cmap=plt.cm.hot)
plt.xlabel('max_depth', fontsize=15)
plt.ylabel('min_samples_leaf', fontsize=15)
plt.xticks([0,1,2,3,4], max_depth_vals)
plt.yticks([0,1,2,3,4], min_samples_leaf_vals)
plt.title('Amplitude heatmap for hyper-parameter search', fontsize=15)
cbar = plt.colorbar()
cbar.ax.set_ylabel('Test Accuracy', rotation=270, fontsize=12, labelpad=16)
plt.tight_layout()

# Print out the optimal test accuracy
print(f'The best test accuracy found optimizing hyper-parameters for the amplitude data is {np.max(amp_accuracy_vals):.4f}')

# Extract the optimal hyper-parameters
best_min_samples_idx, best_max_depth_idx = np.where(amp_accuracy_vals == np.max(amp_accuracy_vals))
best_min_samples = min_samples_leaf_vals[best_min_samples_idx][0]
best_max_depth = max_depth_vals[best_max_depth_idx][0]
print(f"The optimal max_depth value is: {best_max_depth}")
print(f"The optimal min_samples_leaf value is: {best_min_samples}")
The best test accuracy found optimizing hyper-parameters for the amplitude data is 0.2736
The optimal max_depth value is: 30
The optimal min_samples_leaf value is: 4
In [ ]:
# Perform grid search for mel spectrogram data
mel_accuracy_vals = grid_search(inputTrain=PCA_mel_train, targetTrain=y_mel_train, inputTest=PCA_mel_test, 
                targetTest=y_mel_test, max_depth=max_depth_vals, min_samples_leaf=min_samples_leaf_vals)
In [ ]:
# Plot a heatmap to visualize the hyper-parameter search
plt.figure(figsize=(8,5))
plt.imshow(mel_accuracy_vals, cmap=plt.cm.hot)
plt.xlabel('max_depth', fontsize=15)
plt.ylabel('min_samples_leaf', fontsize=15)
plt.xticks([0,1,2,3,4], max_depth_vals)
plt.yticks([0,1,2,3,4], min_samples_leaf_vals)
plt.title('Mel spectrogram heatmap for hyper-parameter search', fontsize=15)
cbar = plt.colorbar()
cbar.ax.set_ylabel('Test Accuracy', rotation=270, fontsize=12, labelpad=16)
plt.tight_layout()

# Print out the optimal test accuracy
print(f'The best test accuracy found optimizing hyper-parameters for the Mel spectrogram data is {np.max(mel_accuracy_vals):.4f}')

# Extract the optimal hyper-parameters
best_min_samples_idx, best_max_depth_idx = np.where(mel_accuracy_vals == np.max(mel_accuracy_vals))
best_min_samples = min_samples_leaf_vals[best_min_samples_idx][0]
best_max_depth = max_depth_vals[best_max_depth_idx][0]
print(f"The optimal max_depth value is: {best_max_depth}")
print(f"The optimal min_samples_leaf value is: {best_min_samples}")
The best test accuracy found optimizing hyper-parameters for the Mel spectrogram data is 0.4192
The optimal max_depth value is: 20
The optimal min_samples_leaf value is: 3
In [ ]:
# Perform hyperparameter tuning for kNN
n_neighbors_list = [1,2,3,4,5,6]

# Initialize empty arrays to hold test accuracies
knn_amp_test_accuracies = []
knn_mel_test_accuracies = []

# Loop throug the values of k
for k in n_neighbors_list:
    amp_knn = KNeighborsClassifier(n_neighbors=k)
    amp_knn.fit(PCA_amp_train, y_amp_train)
    amp_knn_test_pred = amp_knn.predict(PCA_amp_test)
    mel_knn = KNeighborsClassifier(n_neighbors=k)
    mel_knn.fit(PCA_mel_train, y_mel_train)
    mel_knn_test_pred = mel_knn.predict(PCA_mel_test)
    amp_test_accuracy = accuracy_score(y_amp_test, amp_knn_test_pred)
    mel_test_accuracy = accuracy_score(y_mel_test, mel_knn_test_pred)
    knn_amp_test_accuracies.append(amp_test_accuracy)
    knn_mel_test_accuracies.append(mel_test_accuracy)

# Print out the results
print(f"The best accuracy achieved for amplitude was: {max(knn_amp_test_accuracies):.4f} with k={np.argmax(knn_amp_test_accuracies)+1}")
print(f"The best accuracy achieved for Mel spectrogram was: {max(knn_mel_test_accuracies):.4f} with k={np.argmax(knn_mel_test_accuracies)+1}")
The best accuracy achieved for amplitude was: 0.2057 with k=1
The best accuracy achieved for Mel spectrogram was: 0.3687 with k=5

Bonus Part D: Build an LSTM

Mel spectrogram

In [31]:
# convert flattened and scaled mel data back to 3d
X_mel_train_input = X_mel_train_scaled.reshape(X_mel_train.shape)
X_mel_test_input = X_mel_test_scaled.reshape(X_mel_test.shape)

# split data into train and val
X_mel_train_input, X_mel_val_input, y_mel_train, y_mel_val = train_test_split(X_mel_train_input, y_mel_train, train_size=0.8)

# save into dataframe
mel_train_data = Dataset.from_tensor_slices((X_mel_train_input, tf.keras.utils.to_categorical(y_mel_train))).batch(32)
mel_val_data = Dataset.from_tensor_slices((X_mel_val_input, tf.keras.utils.to_categorical(y_mel_val))).batch(32)
mel_test_data = Dataset.from_tensor_slices((X_mel_test_input, tf.keras.utils.to_categorical(y_mel_test))).batch(32)
In [32]:
# Create LSTM
lstm = Sequential(name='Mel')
lstm.add(Bidirectional(LSTM(128), input_shape=X_mel_train_input.shape[1:]))
# lstm.add(Bidirectional(LSTM(64)))
lstm.add(Dense(500, activation='relu'))
lstm.add(Dropout(0.05))
lstm.add(Dense(256, activation='relu'))
lstm.add(Dropout(0.05))
lstm.add(Dense(128, activation='relu'))
lstm.add(Dense(10, activation='softmax'))

lstm.summary()
Model: "Mel"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
bidirectional (Bidirectional (None, 256)               221184    
_________________________________________________________________
dense_8 (Dense)              (None, 500)               128500    
_________________________________________________________________
dropout_4 (Dropout)          (None, 500)               0         
_________________________________________________________________
dense_9 (Dense)              (None, 256)               128256    
_________________________________________________________________
dropout_5 (Dropout)          (None, 256)               0         
_________________________________________________________________
dense_10 (Dense)             (None, 128)               32896     
_________________________________________________________________
dense_11 (Dense)             (None, 10)                1290      
=================================================================
Total params: 512,126
Trainable params: 512,126
Non-trainable params: 0
_________________________________________________________________
In [ ]:
lstm.compile(
    optimizer=tf.optimizers.Adam(1e-4), 
    loss='categorical_crossentropy', 
    metrics='accuracy'
)

es = EarlyStopping(monitor='val_loss', patience=30)

mel_history = lstm.fit(mel_train_data, 
                       validation_data=mel_val_data, 
                       epochs=175, 
                       verbose=2)
Epoch 1/175
139/139 - 5s - loss: 2.2215 - accuracy: 0.1738 - val_loss: 2.0836 - val_accuracy: 0.1899
Epoch 2/175
139/139 - 2s - loss: 2.0021 - accuracy: 0.2476 - val_loss: 1.9331 - val_accuracy: 0.3042
Epoch 3/175
139/139 - 2s - loss: 1.8466 - accuracy: 0.3057 - val_loss: 1.7951 - val_accuracy: 0.3519
Epoch 4/175
139/139 - 2s - loss: 1.7433 - accuracy: 0.3476 - val_loss: 1.7920 - val_accuracy: 0.3456
Epoch 5/175
139/139 - 2s - loss: 1.6974 - accuracy: 0.3584 - val_loss: 1.6982 - val_accuracy: 0.3771
Epoch 6/175
139/139 - 2s - loss: 1.6487 - accuracy: 0.3816 - val_loss: 1.6706 - val_accuracy: 0.3735
Epoch 7/175
139/139 - 2s - loss: 1.6263 - accuracy: 0.3827 - val_loss: 1.6510 - val_accuracy: 0.3789
Epoch 8/175
139/139 - 2s - loss: 1.5921 - accuracy: 0.3978 - val_loss: 1.6417 - val_accuracy: 0.3933
Epoch 9/175
139/139 - 2s - loss: 1.5709 - accuracy: 0.4045 - val_loss: 1.6151 - val_accuracy: 0.3960
Epoch 10/175
139/139 - 2s - loss: 1.5465 - accuracy: 0.4156 - val_loss: 1.6812 - val_accuracy: 0.3924
Epoch 11/175
139/139 - 2s - loss: 1.5382 - accuracy: 0.4291 - val_loss: 1.6040 - val_accuracy: 0.4221
Epoch 12/175
139/139 - 2s - loss: 1.5046 - accuracy: 0.4390 - val_loss: 1.5683 - val_accuracy: 0.4410
Epoch 13/175
139/139 - 2s - loss: 1.4848 - accuracy: 0.4444 - val_loss: 1.5282 - val_accuracy: 0.4446
Epoch 14/175
139/139 - 2s - loss: 1.4654 - accuracy: 0.4622 - val_loss: 1.5103 - val_accuracy: 0.4545
Epoch 15/175
139/139 - 2s - loss: 1.4548 - accuracy: 0.4575 - val_loss: 1.5076 - val_accuracy: 0.4491
Epoch 16/175
139/139 - 2s - loss: 1.4396 - accuracy: 0.4710 - val_loss: 1.4902 - val_accuracy: 0.4563
Epoch 17/175
139/139 - 2s - loss: 1.4206 - accuracy: 0.4761 - val_loss: 1.4845 - val_accuracy: 0.4563
Epoch 18/175
139/139 - 2s - loss: 1.4097 - accuracy: 0.4827 - val_loss: 1.4752 - val_accuracy: 0.4680
Epoch 19/175
139/139 - 2s - loss: 1.3920 - accuracy: 0.4869 - val_loss: 1.4620 - val_accuracy: 0.4806
Epoch 20/175
139/139 - 2s - loss: 1.3819 - accuracy: 0.4989 - val_loss: 1.4543 - val_accuracy: 0.4788
Epoch 21/175
139/139 - 2s - loss: 1.3675 - accuracy: 0.5090 - val_loss: 1.4639 - val_accuracy: 0.4662
Epoch 22/175
139/139 - 2s - loss: 1.3543 - accuracy: 0.5135 - val_loss: 1.4480 - val_accuracy: 0.4734
Epoch 23/175
139/139 - 2s - loss: 1.3433 - accuracy: 0.5155 - val_loss: 1.4387 - val_accuracy: 0.4761
Epoch 24/175
139/139 - 2s - loss: 1.3283 - accuracy: 0.5218 - val_loss: 1.4301 - val_accuracy: 0.4761
Epoch 25/175
139/139 - 2s - loss: 1.3168 - accuracy: 0.5270 - val_loss: 1.4274 - val_accuracy: 0.4806
Epoch 26/175
139/139 - 2s - loss: 1.3015 - accuracy: 0.5369 - val_loss: 1.4225 - val_accuracy: 0.4869
Epoch 27/175
139/139 - 2s - loss: 1.2926 - accuracy: 0.5344 - val_loss: 1.4048 - val_accuracy: 0.5005
Epoch 28/175
139/139 - 2s - loss: 1.2814 - accuracy: 0.5412 - val_loss: 1.4105 - val_accuracy: 0.5122
Epoch 29/175
139/139 - 2s - loss: 1.2629 - accuracy: 0.5531 - val_loss: 1.3878 - val_accuracy: 0.5095
Epoch 30/175
139/139 - 2s - loss: 1.2529 - accuracy: 0.5556 - val_loss: 1.4051 - val_accuracy: 0.5221
Epoch 31/175
139/139 - 2s - loss: 1.2425 - accuracy: 0.5563 - val_loss: 1.3787 - val_accuracy: 0.5284
Epoch 32/175
139/139 - 2s - loss: 1.2330 - accuracy: 0.5635 - val_loss: 1.3940 - val_accuracy: 0.5203
Epoch 33/175
139/139 - 2s - loss: 1.2194 - accuracy: 0.5682 - val_loss: 1.3674 - val_accuracy: 0.5266
Epoch 34/175
139/139 - 2s - loss: 1.2092 - accuracy: 0.5729 - val_loss: 1.3691 - val_accuracy: 0.5356
Epoch 35/175
139/139 - 2s - loss: 1.2034 - accuracy: 0.5696 - val_loss: 1.3726 - val_accuracy: 0.5347
Epoch 36/175
139/139 - 2s - loss: 1.1909 - accuracy: 0.5743 - val_loss: 1.3371 - val_accuracy: 0.5392
Epoch 37/175
139/139 - 2s - loss: 1.1780 - accuracy: 0.5808 - val_loss: 1.3315 - val_accuracy: 0.5437
Epoch 38/175
139/139 - 2s - loss: 1.1625 - accuracy: 0.5871 - val_loss: 1.3238 - val_accuracy: 0.5428
Epoch 39/175
139/139 - 2s - loss: 1.1493 - accuracy: 0.5862 - val_loss: 1.3182 - val_accuracy: 0.5473
Epoch 40/175
139/139 - 2s - loss: 1.1445 - accuracy: 0.5860 - val_loss: 1.3199 - val_accuracy: 0.5608
Epoch 41/175
139/139 - 2s - loss: 1.1351 - accuracy: 0.5925 - val_loss: 1.2844 - val_accuracy: 0.5797
Epoch 42/175
139/139 - 2s - loss: 1.1173 - accuracy: 0.5988 - val_loss: 1.2860 - val_accuracy: 0.5788
Epoch 43/175
139/139 - 2s - loss: 1.1106 - accuracy: 0.6051 - val_loss: 1.2683 - val_accuracy: 0.5698
Epoch 44/175
139/139 - 2s - loss: 1.0967 - accuracy: 0.6085 - val_loss: 1.2551 - val_accuracy: 0.5698
Epoch 45/175
139/139 - 2s - loss: 1.0850 - accuracy: 0.6094 - val_loss: 1.2451 - val_accuracy: 0.5725
Epoch 46/175
139/139 - 2s - loss: 1.0755 - accuracy: 0.6139 - val_loss: 1.2387 - val_accuracy: 0.5752
Epoch 47/175
139/139 - 2s - loss: 1.0719 - accuracy: 0.6150 - val_loss: 1.2282 - val_accuracy: 0.5842
Epoch 48/175
139/139 - 2s - loss: 1.0576 - accuracy: 0.6184 - val_loss: 1.2146 - val_accuracy: 0.5851
Epoch 49/175
139/139 - 2s - loss: 1.0474 - accuracy: 0.6209 - val_loss: 1.2123 - val_accuracy: 0.5869
Epoch 50/175
139/139 - 2s - loss: 1.0369 - accuracy: 0.6274 - val_loss: 1.1967 - val_accuracy: 0.5995
Epoch 51/175
139/139 - 2s - loss: 1.0305 - accuracy: 0.6299 - val_loss: 1.1804 - val_accuracy: 0.5905
Epoch 52/175
139/139 - 2s - loss: 1.0213 - accuracy: 0.6303 - val_loss: 1.1800 - val_accuracy: 0.6022
Epoch 53/175
139/139 - 2s - loss: 1.0081 - accuracy: 0.6369 - val_loss: 1.1801 - val_accuracy: 0.5968
Epoch 54/175
139/139 - 2s - loss: 0.9999 - accuracy: 0.6348 - val_loss: 1.1782 - val_accuracy: 0.5950
Epoch 55/175
139/139 - 2s - loss: 0.9845 - accuracy: 0.6466 - val_loss: 1.1674 - val_accuracy: 0.5977
Epoch 56/175
139/139 - 2s - loss: 0.9863 - accuracy: 0.6441 - val_loss: 1.1576 - val_accuracy: 0.6139
Epoch 57/175
139/139 - 2s - loss: 0.9670 - accuracy: 0.6533 - val_loss: 1.1433 - val_accuracy: 0.6148
Epoch 58/175
139/139 - 2s - loss: 0.9625 - accuracy: 0.6515 - val_loss: 1.1564 - val_accuracy: 0.6022
Epoch 59/175
139/139 - 2s - loss: 0.9601 - accuracy: 0.6513 - val_loss: 1.1265 - val_accuracy: 0.6319
Epoch 60/175
139/139 - 2s - loss: 0.9539 - accuracy: 0.6580 - val_loss: 1.1193 - val_accuracy: 0.6337
Epoch 61/175
139/139 - 2s - loss: 0.9362 - accuracy: 0.6628 - val_loss: 1.1341 - val_accuracy: 0.6103
Epoch 62/175
139/139 - 2s - loss: 0.9280 - accuracy: 0.6652 - val_loss: 1.1262 - val_accuracy: 0.6148
Epoch 63/175
139/139 - 2s - loss: 0.9233 - accuracy: 0.6724 - val_loss: 1.1013 - val_accuracy: 0.6247
Epoch 64/175
139/139 - 2s - loss: 0.9071 - accuracy: 0.6769 - val_loss: 1.1054 - val_accuracy: 0.6193
Epoch 65/175
139/139 - 2s - loss: 0.9030 - accuracy: 0.6769 - val_loss: 1.0991 - val_accuracy: 0.6175
Epoch 66/175
139/139 - 2s - loss: 0.8976 - accuracy: 0.6781 - val_loss: 1.0954 - val_accuracy: 0.6193
Epoch 67/175
139/139 - 2s - loss: 0.8860 - accuracy: 0.6824 - val_loss: 1.0967 - val_accuracy: 0.6157
Epoch 68/175
139/139 - 2s - loss: 0.8832 - accuracy: 0.6821 - val_loss: 1.0885 - val_accuracy: 0.6274
Epoch 69/175
139/139 - 2s - loss: 0.8676 - accuracy: 0.6943 - val_loss: 1.0865 - val_accuracy: 0.6229
Epoch 70/175
139/139 - 2s - loss: 0.8618 - accuracy: 0.6898 - val_loss: 1.0771 - val_accuracy: 0.6391
Epoch 71/175
139/139 - 2s - loss: 0.8486 - accuracy: 0.6925 - val_loss: 1.0774 - val_accuracy: 0.6427
Epoch 72/175
139/139 - 2s - loss: 0.8453 - accuracy: 0.6972 - val_loss: 1.0666 - val_accuracy: 0.6445
Epoch 73/175
139/139 - 2s - loss: 0.8410 - accuracy: 0.6947 - val_loss: 1.0740 - val_accuracy: 0.6292
Epoch 74/175
139/139 - 2s - loss: 0.8261 - accuracy: 0.7017 - val_loss: 1.0753 - val_accuracy: 0.6553
Epoch 75/175
139/139 - 2s - loss: 0.8227 - accuracy: 0.7035 - val_loss: 1.0702 - val_accuracy: 0.6499
Epoch 76/175
139/139 - 2s - loss: 0.8156 - accuracy: 0.7096 - val_loss: 1.0806 - val_accuracy: 0.6490
Epoch 77/175
139/139 - 2s - loss: 0.8075 - accuracy: 0.7100 - val_loss: 1.0824 - val_accuracy: 0.6382
Epoch 78/175
139/139 - 2s - loss: 0.8073 - accuracy: 0.7103 - val_loss: 1.0581 - val_accuracy: 0.6625
Epoch 79/175
139/139 - 2s - loss: 0.7874 - accuracy: 0.7195 - val_loss: 1.0697 - val_accuracy: 0.6562
Epoch 80/175
139/139 - 2s - loss: 0.7862 - accuracy: 0.7215 - val_loss: 1.0647 - val_accuracy: 0.6454
Epoch 81/175
139/139 - 2s - loss: 0.7759 - accuracy: 0.7258 - val_loss: 1.0675 - val_accuracy: 0.6679
Epoch 82/175
139/139 - 2s - loss: 0.7735 - accuracy: 0.7199 - val_loss: 1.0684 - val_accuracy: 0.6652
Epoch 83/175
139/139 - 2s - loss: 0.7682 - accuracy: 0.7235 - val_loss: 1.0529 - val_accuracy: 0.6733
Epoch 84/175
139/139 - 2s - loss: 0.7619 - accuracy: 0.7262 - val_loss: 1.0561 - val_accuracy: 0.6571
Epoch 85/175
139/139 - 2s - loss: 0.7541 - accuracy: 0.7346 - val_loss: 1.0962 - val_accuracy: 0.6508
Epoch 86/175
139/139 - 2s - loss: 0.7489 - accuracy: 0.7323 - val_loss: 1.0696 - val_accuracy: 0.6616
Epoch 87/175
139/139 - 2s - loss: 0.7513 - accuracy: 0.7285 - val_loss: 1.0653 - val_accuracy: 0.6670
Epoch 88/175
139/139 - 2s - loss: 0.7403 - accuracy: 0.7330 - val_loss: 1.0680 - val_accuracy: 0.6580
Epoch 89/175
139/139 - 2s - loss: 0.7290 - accuracy: 0.7364 - val_loss: 1.0703 - val_accuracy: 0.6661
Epoch 90/175
139/139 - 2s - loss: 0.7252 - accuracy: 0.7434 - val_loss: 1.0513 - val_accuracy: 0.6679
Epoch 91/175
139/139 - 2s - loss: 0.7191 - accuracy: 0.7436 - val_loss: 1.0750 - val_accuracy: 0.6724
Epoch 92/175
139/139 - 2s - loss: 0.7198 - accuracy: 0.7431 - val_loss: 1.0645 - val_accuracy: 0.6760
Epoch 93/175
139/139 - 2s - loss: 0.7101 - accuracy: 0.7461 - val_loss: 1.0700 - val_accuracy: 0.6760
Epoch 94/175
139/139 - 2s - loss: 0.6994 - accuracy: 0.7458 - val_loss: 1.0837 - val_accuracy: 0.6661
Epoch 95/175
139/139 - 2s - loss: 0.6892 - accuracy: 0.7535 - val_loss: 1.0781 - val_accuracy: 0.6796
Epoch 96/175
139/139 - 2s - loss: 0.6969 - accuracy: 0.7452 - val_loss: 1.0472 - val_accuracy: 0.6805
Epoch 97/175
139/139 - 2s - loss: 0.6835 - accuracy: 0.7555 - val_loss: 1.0549 - val_accuracy: 0.6868
Epoch 98/175
139/139 - 2s - loss: 0.6712 - accuracy: 0.7609 - val_loss: 1.1081 - val_accuracy: 0.6751
Epoch 99/175
139/139 - 2s - loss: 0.6729 - accuracy: 0.7625 - val_loss: 1.0715 - val_accuracy: 0.7012
Epoch 100/175
139/139 - 2s - loss: 0.6681 - accuracy: 0.7600 - val_loss: 1.0769 - val_accuracy: 0.6805
Epoch 101/175
139/139 - 2s - loss: 0.6633 - accuracy: 0.7587 - val_loss: 1.0714 - val_accuracy: 0.6922
Epoch 102/175
139/139 - 2s - loss: 0.6502 - accuracy: 0.7683 - val_loss: 1.0914 - val_accuracy: 0.6841
Epoch 103/175
139/139 - 2s - loss: 0.6537 - accuracy: 0.7627 - val_loss: 1.0816 - val_accuracy: 0.6841
Epoch 104/175
139/139 - 2s - loss: 0.6396 - accuracy: 0.7735 - val_loss: 1.1142 - val_accuracy: 0.6877
Epoch 105/175
139/139 - 2s - loss: 0.6375 - accuracy: 0.7715 - val_loss: 1.0802 - val_accuracy: 0.6886
Epoch 106/175
139/139 - 2s - loss: 0.6390 - accuracy: 0.7679 - val_loss: 1.0743 - val_accuracy: 0.7030
Epoch 107/175
139/139 - 2s - loss: 0.6369 - accuracy: 0.7681 - val_loss: 1.1197 - val_accuracy: 0.6886
Epoch 108/175
139/139 - 2s - loss: 0.6313 - accuracy: 0.7747 - val_loss: 1.1084 - val_accuracy: 0.6796
Epoch 109/175
139/139 - 2s - loss: 0.6192 - accuracy: 0.7762 - val_loss: 1.1076 - val_accuracy: 0.6895
Epoch 110/175
139/139 - 2s - loss: 0.6078 - accuracy: 0.7789 - val_loss: 1.1021 - val_accuracy: 0.7021
Epoch 111/175
139/139 - 2s - loss: 0.6161 - accuracy: 0.7769 - val_loss: 1.1387 - val_accuracy: 0.6751
Epoch 112/175
139/139 - 2s - loss: 0.6027 - accuracy: 0.7803 - val_loss: 1.1405 - val_accuracy: 0.6832
Epoch 113/175
139/139 - 2s - loss: 0.5992 - accuracy: 0.7857 - val_loss: 1.1221 - val_accuracy: 0.6949
Epoch 114/175
139/139 - 2s - loss: 0.6001 - accuracy: 0.7837 - val_loss: 1.1722 - val_accuracy: 0.6751
Epoch 115/175
139/139 - 2s - loss: 0.6101 - accuracy: 0.7760 - val_loss: 1.1622 - val_accuracy: 0.6715
Epoch 116/175
139/139 - 2s - loss: 0.5979 - accuracy: 0.7828 - val_loss: 1.1420 - val_accuracy: 0.6841
Epoch 117/175
139/139 - 2s - loss: 0.5908 - accuracy: 0.7870 - val_loss: 1.1484 - val_accuracy: 0.6850
Epoch 118/175
139/139 - 2s - loss: 0.5897 - accuracy: 0.7870 - val_loss: 1.1215 - val_accuracy: 0.7039
Epoch 119/175
139/139 - 2s - loss: 0.5872 - accuracy: 0.7850 - val_loss: 1.1695 - val_accuracy: 0.6787
Epoch 120/175
139/139 - 2s - loss: 0.5777 - accuracy: 0.7902 - val_loss: 1.1537 - val_accuracy: 0.6922
Epoch 121/175
139/139 - 2s - loss: 0.5770 - accuracy: 0.7929 - val_loss: 1.1735 - val_accuracy: 0.6886
Epoch 122/175
139/139 - 2s - loss: 0.5734 - accuracy: 0.7911 - val_loss: 1.1680 - val_accuracy: 0.6922
Epoch 123/175
139/139 - 2s - loss: 0.5692 - accuracy: 0.7891 - val_loss: 1.1292 - val_accuracy: 0.7048
Epoch 124/175
139/139 - 2s - loss: 0.5665 - accuracy: 0.7978 - val_loss: 1.1910 - val_accuracy: 0.6805
Epoch 125/175
139/139 - 2s - loss: 0.5630 - accuracy: 0.7985 - val_loss: 1.1372 - val_accuracy: 0.7156
Epoch 126/175
139/139 - 2s - loss: 0.5573 - accuracy: 0.7945 - val_loss: 1.1280 - val_accuracy: 0.7165
Epoch 127/175
139/139 - 2s - loss: 0.5574 - accuracy: 0.7949 - val_loss: 1.1259 - val_accuracy: 0.7075
Epoch 128/175
139/139 - 2s - loss: 0.5495 - accuracy: 0.7974 - val_loss: 1.1433 - val_accuracy: 0.7102
Epoch 129/175
139/139 - 2s - loss: 0.5656 - accuracy: 0.7951 - val_loss: 1.1324 - val_accuracy: 0.7039
Epoch 130/175
139/139 - 2s - loss: 0.5344 - accuracy: 0.8048 - val_loss: 1.1519 - val_accuracy: 0.7093
Epoch 131/175
139/139 - 2s - loss: 0.5474 - accuracy: 0.8032 - val_loss: 1.1621 - val_accuracy: 0.7030
Epoch 132/175
139/139 - 2s - loss: 0.5406 - accuracy: 0.8041 - val_loss: 1.1642 - val_accuracy: 0.7057
Epoch 133/175
139/139 - 2s - loss: 0.5421 - accuracy: 0.7983 - val_loss: 1.1666 - val_accuracy: 0.7165
Epoch 134/175
139/139 - 2s - loss: 0.5440 - accuracy: 0.8001 - val_loss: 1.1533 - val_accuracy: 0.7066
Epoch 135/175
139/139 - 2s - loss: 0.5239 - accuracy: 0.8161 - val_loss: 1.1505 - val_accuracy: 0.7093
Epoch 136/175
139/139 - 2s - loss: 0.5275 - accuracy: 0.8057 - val_loss: 1.1486 - val_accuracy: 0.7228
Epoch 137/175
139/139 - 2s - loss: 0.5242 - accuracy: 0.8077 - val_loss: 1.1450 - val_accuracy: 0.7048
Epoch 138/175
139/139 - 2s - loss: 0.5353 - accuracy: 0.8098 - val_loss: 1.1305 - val_accuracy: 0.7228
Epoch 139/175
139/139 - 2s - loss: 0.5149 - accuracy: 0.8190 - val_loss: 1.1852 - val_accuracy: 0.7084
Epoch 140/175
139/139 - 2s - loss: 0.5024 - accuracy: 0.8172 - val_loss: 1.1876 - val_accuracy: 0.7111
Epoch 141/175
139/139 - 2s - loss: 0.5120 - accuracy: 0.8154 - val_loss: 1.1676 - val_accuracy: 0.7183
Epoch 142/175
139/139 - 2s - loss: 0.5027 - accuracy: 0.8149 - val_loss: 1.1503 - val_accuracy: 0.7309
Epoch 143/175
139/139 - 2s - loss: 0.5007 - accuracy: 0.8154 - val_loss: 1.1657 - val_accuracy: 0.7174
Epoch 144/175
139/139 - 2s - loss: 0.4974 - accuracy: 0.8208 - val_loss: 1.1936 - val_accuracy: 0.7390
Epoch 145/175
139/139 - 2s - loss: 0.4969 - accuracy: 0.8197 - val_loss: 1.1596 - val_accuracy: 0.7381
Epoch 146/175
139/139 - 2s - loss: 0.4830 - accuracy: 0.8246 - val_loss: 1.1675 - val_accuracy: 0.7336
Epoch 147/175
139/139 - 2s - loss: 0.5163 - accuracy: 0.8107 - val_loss: 1.2145 - val_accuracy: 0.7012
Epoch 148/175
139/139 - 2s - loss: 0.5086 - accuracy: 0.8145 - val_loss: 1.1397 - val_accuracy: 0.7354
Epoch 149/175
139/139 - 2s - loss: 0.4770 - accuracy: 0.8237 - val_loss: 1.1790 - val_accuracy: 0.7282
Epoch 150/175
139/139 - 2s - loss: 0.4788 - accuracy: 0.8267 - val_loss: 1.2202 - val_accuracy: 0.7192
Epoch 151/175
139/139 - 2s - loss: 0.4732 - accuracy: 0.8289 - val_loss: 1.1956 - val_accuracy: 0.7354
Epoch 152/175
139/139 - 2s - loss: 0.4720 - accuracy: 0.8309 - val_loss: 1.1731 - val_accuracy: 0.7300
Epoch 153/175
139/139 - 2s - loss: 0.4687 - accuracy: 0.8264 - val_loss: 1.1789 - val_accuracy: 0.7318
Epoch 154/175
139/139 - 2s - loss: 0.4602 - accuracy: 0.8318 - val_loss: 1.2084 - val_accuracy: 0.7354
Epoch 155/175
139/139 - 2s - loss: 0.4637 - accuracy: 0.8384 - val_loss: 1.2007 - val_accuracy: 0.7291
Epoch 156/175
139/139 - 2s - loss: 0.4622 - accuracy: 0.8350 - val_loss: 1.2117 - val_accuracy: 0.7300
Epoch 157/175
139/139 - 2s - loss: 0.4613 - accuracy: 0.8323 - val_loss: 1.2027 - val_accuracy: 0.7309
Epoch 158/175
139/139 - 2s - loss: 0.4540 - accuracy: 0.8363 - val_loss: 1.2247 - val_accuracy: 0.7327
Epoch 159/175
139/139 - 2s - loss: 0.4511 - accuracy: 0.8350 - val_loss: 1.2366 - val_accuracy: 0.7237
Epoch 160/175
139/139 - 2s - loss: 0.4784 - accuracy: 0.8231 - val_loss: 1.1719 - val_accuracy: 0.7372
Epoch 161/175
139/139 - 2s - loss: 0.4422 - accuracy: 0.8375 - val_loss: 1.2105 - val_accuracy: 0.7480
Epoch 162/175
139/139 - 2s - loss: 0.4391 - accuracy: 0.8433 - val_loss: 1.2094 - val_accuracy: 0.7336
Epoch 163/175
139/139 - 2s - loss: 0.4501 - accuracy: 0.8370 - val_loss: 1.1964 - val_accuracy: 0.7408
Epoch 164/175
139/139 - 2s - loss: 0.4535 - accuracy: 0.8327 - val_loss: 1.2524 - val_accuracy: 0.7192
Epoch 165/175
139/139 - 2s - loss: 0.4449 - accuracy: 0.8424 - val_loss: 1.2096 - val_accuracy: 0.7300
Epoch 166/175
139/139 - 2s - loss: 0.4423 - accuracy: 0.8386 - val_loss: 1.2088 - val_accuracy: 0.7381
Epoch 167/175
139/139 - 2s - loss: 0.4409 - accuracy: 0.8375 - val_loss: 1.2207 - val_accuracy: 0.7381
Epoch 168/175
139/139 - 2s - loss: 0.4224 - accuracy: 0.8469 - val_loss: 1.2169 - val_accuracy: 0.7345
Epoch 169/175
139/139 - 2s - loss: 0.4311 - accuracy: 0.8449 - val_loss: 1.2384 - val_accuracy: 0.7318
Epoch 170/175
139/139 - 2s - loss: 0.4275 - accuracy: 0.8424 - val_loss: 1.2832 - val_accuracy: 0.7138
Epoch 171/175
139/139 - 2s - loss: 0.4437 - accuracy: 0.8399 - val_loss: 1.2629 - val_accuracy: 0.7273
Epoch 172/175
139/139 - 2s - loss: 0.4423 - accuracy: 0.8370 - val_loss: 1.2021 - val_accuracy: 0.7309
Epoch 173/175
139/139 - 2s - loss: 0.4611 - accuracy: 0.8334 - val_loss: 1.2560 - val_accuracy: 0.7246
Epoch 174/175
139/139 - 2s - loss: 0.4411 - accuracy: 0.8415 - val_loss: 1.2187 - val_accuracy: 0.7309
Epoch 175/175
139/139 - 2s - loss: 0.4233 - accuracy: 0.8469 - val_loss: 1.2156 - val_accuracy: 0.7327
In [ ]:
fig, axs = plt.subplots(1, 2, figsize=(12,5))
num_epochs = len(mel_history.history['accuracy'])
axs[0].plot(range(num_epochs), mel_history.history['accuracy'], label='train accuracy')
axs[0].plot(range(num_epochs), mel_history.history['val_accuracy'], label='validation accuracy')
axs[0].set_xlabel('Epochs')
axs[0].set_ylabel('Accuracy')
axs[0].set_title('LSTM – Train and Val Accuracy')
axs[0].legend()

axs[1].plot(range(num_epochs), mel_history.history['loss'], label='train loss')
axs[1].plot(range(num_epochs), mel_history.history['val_loss'], label='validation loss')
axs[1].set_xlabel('Epochs')
axs[1].set_ylabel('Loss')
axs[1].set_title('LSTM – Train and Val Loss')
axs[1].legend();
In [ ]:
test_accuracy = lstm.evaluate(mel_test_data)[1]
final_val_acc = mel_history.history['val_accuracy'][-1]
final_train_acc = mel_history.history['accuracy'][-1]
print(f'The LSTM\'s accuracy for the test set is {test_accuracy*100:0.4}%')
print(f'The final training and test accuracies are {final_train_acc*100:0.4}% and {final_val_acc*100:0.4}%, respectively')
69/69 [==============================] - 1s 9ms/step - loss: 3.7060 - accuracy: 0.4775
The LSTM's accuracy for the test set is 47.75%
The final training and test accuracies are 84.69% and 73.27%, respectively