1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
- import tensorflow as tf
- import mlflow
- Parameters = {
- 'epoch': 5,
- 'b_size': 256,
- 'learning_rate': 0.1,
- 'momentum': 0.9,
- 'use_nesterov': True,
- 'number_of_neurons': 512,
- 'dropout': 0.25
- }
- with mlflow.start_run(run_name='run_example'):
- for name, value in Parameters.items():
- mlflow.log_param(name, value)
- mnist = tf.keras.datasets.mnist
- (x_train, y_train), (x_test, y_test) = mnist.load_data()
- x_train, x_test = x_train / 255.0, x_test / 255.0
- model = tf.keras.models.Sequential([
- tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(Parameters['number_of_neurons'], activation=tf.nn.relu),
- tf.keras.layers.Dropout(Parameters['dropout']),
- tf.keras.layers.Dense(10, activation=tf.nn.softmax)
- ])
- optimizer = tf.keras.optimizers.SGD(lr=Parameters['learning_rate'],
- momentum=Parameters['momentum'],
- nesterov=Parameters['use_nesterov'], )
- model.compile(optimizer=optimizer,
- loss='sparse_categorical_crossentropy',
- metrics=['accuracy'])
- model.fit(x_train, y_train,
- epochs=Parameters['epoch'],
- batch_size=Parameters['b_size'])
- test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
- mlflow.log_metric("test_loss", test_loss)
- mlflow.log_metric("test_accuracy", test_acc)
|