You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
61 lines
3.0 KiB
Python
61 lines
3.0 KiB
Python
4 years ago
|
import tensorflow as tf
|
||
|
import numpy as np
|
||
|
from tensorflow.keras.callbacks import CSVLogger
|
||
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||
|
mnist = tf.keras.datasets.mnist
|
||
|
|
||
|
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||
|
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
|
||
|
x_train = x_train / 255.0
|
||
|
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
|
||
|
x_test = x_test / 255.0
|
||
|
|
||
|
#y_train = tf.keras.utils.to_categorical(y_train)
|
||
|
y_test = tf.keras.utils.to_categorical(y_test)
|
||
|
|
||
|
def get_random_sample(a, b, number_of_samples=10):
|
||
|
x = []
|
||
|
y = []
|
||
|
for category_number in range(0,10):
|
||
|
# get all samples of a category
|
||
|
train_data_category = a[b==category_number]
|
||
|
# pick a number of random samples from the category
|
||
|
train_data_category = train_data_category[np.random.randint(train_data_category.shape[0],
|
||
|
size=number_of_samples), :]
|
||
|
x.extend(train_data_category)
|
||
|
y.append([category_number]*number_of_samples)
|
||
|
|
||
|
return np.asarray(x).reshape(-1, 28, 28, 1), np.asarray(y).reshape(10*number_of_samples,1)
|
||
|
for j in [0.0]:
|
||
|
for i in ['1','2','3','4','5','6','7','8','9','0']:
|
||
|
|
||
|
model = tf.keras.models.Sequential()
|
||
|
model.add(tf.keras.layers.Conv2D(24,kernel_size=5,padding='same',activation='relu',
|
||
|
input_shape=(28,28,1)))
|
||
|
model.add(tf.keras.layers.MaxPool2D())
|
||
|
model.add(tf.keras.layers.Conv2D(64,kernel_size=5,padding='same',activation='relu'))
|
||
|
model.add(tf.keras.layers.MaxPool2D(padding='same'))
|
||
|
model.add(tf.keras.layers.Flatten())
|
||
|
model.add(tf.keras.layers.Dense(256, activation='relu'))
|
||
|
model.add(tf.keras.layers.Dropout(j))
|
||
|
model.add(tf.keras.layers.Dense(10, activation='softmax'))
|
||
|
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics=["accuracy"])
|
||
|
print(model.summary())
|
||
|
for n in [10,100]:
|
||
|
x_train_, y_train_ = get_random_sample(x_train, y_train, number_of_samples=n)
|
||
|
y_train_ = tf.keras.utils.to_categorical(y_train_)
|
||
|
|
||
|
datagen = ImageDataGenerator(
|
||
|
rotation_range = 30,
|
||
|
zoom_range = 0.15,
|
||
|
width_shift_range=2,
|
||
|
height_shift_range=2,
|
||
|
shear_range = 1)
|
||
|
|
||
|
#x_test_ = np.append(x_train[300:],x_test).reshape(x_train[300:].shape[0]+x_test.shape[0],28,28,1)
|
||
|
#y_test_ = np.append(y_train[300:],y_test).reshape(y_train[300:].shape[0]+y_test.shape[0],10)
|
||
|
|
||
|
# csv_logger = CSVLogger('Sample/adam_dropout_'+str(j).replace('.',"")+'_'+str(n)+'_'+i+'.log')
|
||
|
# history = model.fit(datagen.flow(x_train_, y_train_, batch_size=50), validation_data=(x_test, y_test), epochs=125, callbacks=[csv_logger], steps_per_epoch = x_train_.shape[0]//50)
|
||
|
# history = model.fit(x_train_, y_train_, validation_data=(x_test, y_test), epochs=125, callbacks=[csv_logger])
|