Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

model.py 6.2 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
  1. import json
  2. import pickle
  3. import numpy as np
  4. import tensorflow as tf
  5. from tensorflow.keras import optimizers
  6. from tensorflow.keras.optimizers.experimental import SGD
  7. from tensorflow.keras.models import Model
  8. from tensorflow.keras.layers import Flatten, Dense, Conv2D, GlobalAveragePooling2D
  9. from tensorflow.keras.layers import Dropout, BatchNormalization, Activation
  10. from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau
  11. from tensorflow.keras.preprocessing.image import ImageDataGenerator
  12. from tensorflow.keras import regularizers
  13. from sklearn.utils.class_weight import compute_class_weight
  14. def build_model(num_classes):
  15. # Load pre-trained model
  16. base_model = tf.keras.applications.VGG19(weights='imagenet', include_top=False, input_shape=(48, 48, 3))
  17. # Add dense layers
  18. x = base_model.layers[-2].output
  19. x = GlobalAveragePooling2D()(x)
  20. # Add final classification layer
  21. output_layer = Dense(num_classes, activation='softmax')(x)
  22. # Create model
  23. model = Model(inputs=base_model.input, outputs=output_layer)
  24. # Compile model with hyperparameters
  25. opt = SGD(learning_rate=0.0092,momentum=0.9,nesterov=True)
  26. model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
  27. return model
  28. def train(model, X_train, y_train, X_valid, y_valid):
  29. params = {}
  30. #preserve pre-trained model weights and make layers non-trainable
  31. lr_scheduler = ReduceLROnPlateau(monitor = 'val_accuracy',
  32. factor = 0.4,
  33. patience = 7,
  34. min_lr = 1e-6,
  35. verbose = 1)
  36. early_stopping = EarlyStopping(monitor = 'val_accuracy',
  37. min_delta = 0.00005,
  38. patience = 11,
  39. verbose = 1,
  40. restore_best_weights = True)
  41. callbacks = [early_stopping,lr_scheduler]
  42. # Define image data generator
  43. train_datagen = ImageDataGenerator(rotation_range=20,
  44. width_shift_range=0.20,
  45. height_shift_range=0.20,
  46. shear_range=0.15,
  47. zoom_range=0.15,
  48. horizontal_flip=True,
  49. fill_mode='nearest')
  50. train_datagen.fit(X_train)
  51. batch_size = 96
  52. epochs = 24
  53. steps_per_epoch = (int)(len(X_train) / batch_size)
  54. optims = [SGD(learning_rate = 0.0092, momentum=0.9, nesterov=True)]
  55. model.compile(loss = 'categorical_crossentropy',
  56. optimizer = optims[0],
  57. metrics = ['accuracy'])
  58. class_weights = compute_class_weight('balanced', np.unique(y_train), y_train)
  59. class_weights_dict = dict(enumerate(class_weights)
  60. history = model.fit(train_datagen.flow(X_train,
  61. y_train,
  62. batch_size = batch_size),
  63. validation_data = (X_valid, y_valid),
  64. steps_per_epoch = steps_per_epoch,
  65. epochs = epochs,
  66. callbacks = callbacks,
  67. use_multiprocessing = False,
  68. class_weight=class_weights_dict)
  69. model.save('data/model.h5')
  70. return history
  71. def log_metrics(history):
  72. from dagshub import dagshub_logger, DAGsHubLogger
  73. # Option 1 - As a context manager:
  74. with dagshub_logger(metrics_path="logs/metrics.csv", hparams_path="logs/params.yml") as logger:
  75. # Metric logging:
  76. logger.log_metrics(train_accuracy=history.history['accuracy'][-1], step_num=1)
  77. logger.log_metrics(val_accuracy=history.history['val_accuracy'][-1], step_num=1)
  78. logger.log_metrics(train_loss=history.history['loss'][-1], step_num=1)
  79. logger.log_metrics(val_loss=history.history['val_loss'][-1], step_num=1)
  80. # Hyperparameters logging:
  81. logger.log_hyperparams(optimizer={'type': 'sgd', 'learning_rate': 0.0092, 'momentum': 0.90, 'nesterov': True})
  82. logger.log_hyperparams(loss='categorical_crossentropy')
  83. logger.log_hyperparams(batch_size=96)
  84. logger.log_hyperparams(steps_per_epoch=(int)(len(X_train) / 96))
  85. logger.log_hyperparams(epochs = 24)
  86. logger.log_hyperparams(callbacks=['EarlyStopping', 'ReduceLROnPlateau'])
  87. logger.log_hyperparams(data_augmentation= 'ImageDataGenerator')
  88. if __name__ == '__main__':
  89. X_train = np.load('data/processed/X_train.npy')
  90. # load data from y_train.npy in data/preprocessed directory
  91. y_train = np.load('data/processed/y_train.npy')
  92. # load data from X_test.npy in data/preprocessed directory
  93. X_valid = np.load('data/processed/X_valid.npy')
  94. # load data from y_test.npy in data/preprocessed directory
  95. y_valid = np.load('data/processed/y_valid.npy')
  96. img_width = X_train.shape[1]
  97. img_height = X_train.shape[2]
  98. img_depth = X_train.shape[3]
  99. num_classes = y_train.shape[1]
  100. import os
  101. from dotenv import load_dotenv
  102. # Load environment variables from .env file
  103. load_dotenv()
  104. # Access environment variables
  105. USERNAME = os.getenv('MLFLOW_TRACKING_USERNAME')
  106. PASSWORD = os.getenv('MLFLOW_TRACKING_PASSWORD')
  107. import mlflow
  108. mlflow.set_tracking_uri("https://dagshub.com/GauravMohan1/Emotion-Classification.mlflow")
  109. with mlflow.start_run():
  110. # batch size of 32 performs the best.
  111. model = build_model(num_classes)
  112. history = train(model, X_train, y_train, X_valid, y_valid)
  113. metrics = {"train_accuracy": history.history['accuracy'][-1], "val_accuracy": history.history['val_accuracy'][-1],
  114. "train_loss": history.history['loss'][-1], "val_loss": history.history['val_loss'][-1]}
  115. params = {"optimizer": {'type': 'sgd', 'learning_rate': 0.0092, 'momentum': 0.90, 'nesterov': True}, "loss": 'categorical_crossentropy', 'batch_size': 96,
  116. 'epochs': 24, 'callbacks': ['EarlyStopping', 'ReduceLROnPlateau'], 'data_augmentation': 'ImageDataGenerator'}
  117. mlflow.log_metrics(metrics)
  118. mlflow.log_params(params)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...