Modifique el tamaño del bach a 64, learning rate a 8e-5, el dropout de algunas capas, obteniendo un accuracy con el set de test del 80.46%
in_shape = x_train.shape[1:]
f_size = [32, 32, 64, 64, 128, 128]#filtros en cada capa conv2D
k_size =[3, 3, 3, 3, 3, 3] #tamaño del kernel
r_lr = 8e-5 #learning rate
r_dropout = [0.0, 0.0, 0.2, 0.2, 0.2, 0.2] #dropout en cada capa
def def_model(in_shape,n_clases,f_size,k_size,r_lr):
model = Sequential()
#conv1
model.add(Conv2D(f_size[0], k_size[0], padding= 'same', kernel_regularizer=regularizers.l2(r_lr), input_shape = in_shape))
model.add(Activation('relu'))
#conv2
model.add(Conv2D(f_size[1], k_size[1], padding= 'same', kernel_regularizer=regularizers.l2(r_lr)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(r_dropout[1]))
#conv3
model.add(Conv2D(f_size[2], k_size[2], padding= 'same', kernel_regularizer=regularizers.l2(r_lr)))
model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(r_dropout[2]))
#conv4
model.add(Conv2D(f_size[3], k_size[3], padding= 'same', kernel_regularizer=regularizers.l2(r_lr)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(r_dropout[3]))
#conv5
model.add(Conv2D(f_size[4], k_size[4], padding= 'same', kernel_regularizer=regularizers.l2(r_lr)))
model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(Dropout(0.2))
#conv6
model.add(Conv2D(f_size[5], k_size[5], padding= 'same', kernel_regularizer=regularizers.l2(r_lr)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(r_dropout[5]))
#Clasificacion - Flatten
model.add(Flatten())
model.add(Dense(n_clases, activation='softmax'))
model.summary()
return model
model = def_model(in_shape,n_clases,f_size,k_size,r_lr)
print(y_train[0])
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
hist = model.fit(x_train, y_train, batch_size = 64,epochs=60,
validation_data=( x_val,y_val),
verbose=2, shuffle=True)
plt.plot(hist.history['accuracy'],label='Train')
plt.plot(hist.history['val_accuracy'],label='Val')
plt.legend()
plt.show()
model.evaluate(x_test,y_test)
¿Quieres ver más aportes, preguntas y respuestas de la comunidad?