1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
|
- import tensorflow as tf
- import json
- from .utiles.functions import print_data, load_dataset
- from .const.train_const import *
- from .const.general_const import *
- def preprocess_data_layers():
- data_augmentation_layer = tf.keras.Sequential(
- [
- tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
- tf.keras.layers.experimental.preprocessing.RandomRotation(0.05),
- tf.keras.layers.experimental.preprocessing.RandomZoom(0.05)
- ]
- )
- rescale_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)
- return data_augmentation_layer, rescale_layer
- def build_model(data_augmentation, rescale, img_shape=IMG_SHAPE,
- learning_rate=LEARNING_RATE, num_class=2):
- base_model = tf.keras.applications.VGG19(include_top=False,
- weights='imagenet',
- input_shape=img_shape)
- image_batch, label_batch = next(iter(train_dataset))
- feature_batch = base_model(image_batch)
- # print(feature_batch.shape)
- base_model.trainable = False
- global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
- feature_batch_average = global_average_layer(feature_batch)
- # print(feature_batch_average.shape)
- prediction_layer = tf.keras.layers.Dense(num_class, activation='softmax')
- prediction_batch = prediction_layer(feature_batch_average)
- # print(prediction_batch.shape)
- inputs = tf.keras.Input(shape=img_shape)
- x = data_augmentation(inputs)
- x = rescale(x)
- x = base_model(x, training=False)
- x = global_average_layer(x)
- x = tf.keras.layers.Dropout(0.2)(x)
- outputs = prediction_layer(x)
- model = tf.keras.Model(inputs, outputs)
- model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
- loss='categorical_crossentropy',
- metrics=['accuracy'])
- return model
- def get_callbacks(csv_logger_path, checkpoint_filepath):
- csv_logger = tf.keras.callbacks.CSVLogger(csv_logger_path)
- early_stopping = tf.keras.callbacks.EarlyStopping(patience=4, monitor='val_accuracy')
- model_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath,
- monitor='val_accuracy',
- mode='max',
- save_best_only=True)
- return csv_logger, early_stopping, model_checkpoint
- if __name__ == '__main__':
- train_dataset = load_dataset(RAW_TRAIN_PATH, BATCH_SIZE, IMG_SIZE, LABEL_MODE)
- validation_dataset = load_dataset(RAW_TEST_PATH, BATCH_SIZE, IMG_SIZE, LABEL_MODE)
- class_names = train_dataset.class_names
- with open(CLASS_NAME_PATH, "w") as textfile:
- textfile.write(",".join(class_names))
- if NOTEBOOK:
- print_data(validation_dataset, class_names, notebook=NOTEBOOK, process=False, save=False, predict=False)
- data_augmentation_layer, rescale_layer = preprocess_data_layers()
- print_data(validation_dataset, class_names, notebook=NOTEBOOK,
- process=True, save=True, predict=False,
- save_path='eval/processed_data', rescale_layer=rescale_layer,
- data_augmentation_layer=data_augmentation_layer)
- model = build_model(data_augmentation_layer, rescale_layer, IMG_SHAPE, LEARNING_RATE)
- csv_logger, early_stopping, model_checkpoint = get_callbacks(CSV_LOG_PATH, CHECKPOINT_PATH)
- history = model.fit(train_dataset,
- epochs=INIT_EPOCHS,
- validation_data=validation_dataset,
- callbacks=[csv_logger, early_stopping, model_checkpoint])
- json.dump(history.params, open("model/TF-Model-Checkpoint/history_params.json", 'w'))
|