from keras.models import Sequential from keras.layers import Dense from keras.utils import to_categorical import sklearn.datasets import datetime import matplotlib.pyplot as plt import numpy as np train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt" test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt" X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784) X_train = X_train.toarray() X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784) X_test = X_test.toarray() model = Sequential() model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu')) model.add(Dense(15, kernel_initializer='normal', activation='relu')) model.add(Dense(10, kernel_initializer='normal', activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy']) categorical_labels = to_categorical(y_train, num_classes=10) start = datetime.datetime.today() history = model.fit(X_train, categorical_labels, epochs=40, batch_size=100, validation_split=0.1, verbose=2) scores = model.evaluate(X_test, to_categorical(y_test, num_classes=10)) print for i in range(len(model.metrics_names)): print("%s: %f" % (model.metrics_names[i], scores[i])) print ("Start: " + str(start)) end = datetime.datetime.today() print ("End: " + str(end)) print ("Elapse: " + str(end-start))
Using TensorFlow backend.
WARNING:tensorflow:From /databricks/python/local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /databricks/python/local/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 54000 samples, validate on 6000 samples
Epoch 1/40
- 1s - loss: 0.5369 - categorical_accuracy: 0.8288 - val_loss: 0.2106 - val_categorical_accuracy: 0.9407
Epoch 2/40
- 1s - loss: 0.2335 - categorical_accuracy: 0.9312 - val_loss: 0.1685 - val_categorical_accuracy: 0.9528
Epoch 3/40
- 1s - loss: 0.1895 - categorical_accuracy: 0.9433 - val_loss: 0.1552 - val_categorical_accuracy: 0.9570
Epoch 4/40
- 1s - loss: 0.1637 - categorical_accuracy: 0.9516 - val_loss: 0.1589 - val_categorical_accuracy: 0.9540
Epoch 5/40
- 1s - loss: 0.1516 - categorical_accuracy: 0.9549 - val_loss: 0.1519 - val_categorical_accuracy: 0.9560
Epoch 6/40
- 1s - loss: 0.1381 - categorical_accuracy: 0.9594 - val_loss: 0.1347 - val_categorical_accuracy: 0.9615
Epoch 7/40
- 1s - loss: 0.1300 - categorical_accuracy: 0.9616 - val_loss: 0.1472 - val_categorical_accuracy: 0.9612
Epoch 8/40
- 1s - loss: 0.1271 - categorical_accuracy: 0.9616 - val_loss: 0.1351 - val_categorical_accuracy: 0.9640
Epoch 9/40
- 1s - loss: 0.1203 - categorical_accuracy: 0.9637 - val_loss: 0.1340 - val_categorical_accuracy: 0.9635
Epoch 10/40
- 1s - loss: 0.1120 - categorical_accuracy: 0.9667 - val_loss: 0.1468 - val_categorical_accuracy: 0.9635
Epoch 11/40
- 1s - loss: 0.1059 - categorical_accuracy: 0.9689 - val_loss: 0.1480 - val_categorical_accuracy: 0.9618
Epoch 12/40
- 1s - loss: 0.1056 - categorical_accuracy: 0.9680 - val_loss: 0.1410 - val_categorical_accuracy: 0.9617
Epoch 13/40
- 1s - loss: 0.1028 - categorical_accuracy: 0.9687 - val_loss: 0.1451 - val_categorical_accuracy: 0.9585
Epoch 14/40
- 1s - loss: 0.0981 - categorical_accuracy: 0.9699 - val_loss: 0.1362 - val_categorical_accuracy: 0.9663
Epoch 15/40
- 1s - loss: 0.0930 - categorical_accuracy: 0.9721 - val_loss: 0.1333 - val_categorical_accuracy: 0.9642
Epoch 16/40
- 1s - loss: 0.0926 - categorical_accuracy: 0.9722 - val_loss: 0.1502 - val_categorical_accuracy: 0.9628
Epoch 17/40
- 1s - loss: 0.0896 - categorical_accuracy: 0.9734 - val_loss: 0.1454 - val_categorical_accuracy: 0.9653
Epoch 18/40
- 1s - loss: 0.0871 - categorical_accuracy: 0.9730 - val_loss: 0.1296 - val_categorical_accuracy: 0.9677
Epoch 19/40
- 1s - loss: 0.0856 - categorical_accuracy: 0.9742 - val_loss: 0.1416 - val_categorical_accuracy: 0.9638
Epoch 20/40
- 1s - loss: 0.0859 - categorical_accuracy: 0.9735 - val_loss: 0.1438 - val_categorical_accuracy: 0.9655
Epoch 21/40
- 1s - loss: 0.0825 - categorical_accuracy: 0.9750 - val_loss: 0.1443 - val_categorical_accuracy: 0.9643
Epoch 22/40
- 1s - loss: 0.0795 - categorical_accuracy: 0.9756 - val_loss: 0.1569 - val_categorical_accuracy: 0.9635
Epoch 23/40
- 1s - loss: 0.0770 - categorical_accuracy: 0.9761 - val_loss: 0.1673 - val_categorical_accuracy: 0.9593
Epoch 24/40
- 1s - loss: 0.0781 - categorical_accuracy: 0.9761 - val_loss: 0.1710 - val_categorical_accuracy: 0.9598
Epoch 25/40
- 1s - loss: 0.0743 - categorical_accuracy: 0.9774 - val_loss: 0.1568 - val_categorical_accuracy: 0.9623
Epoch 26/40
- 1s - loss: 0.0732 - categorical_accuracy: 0.9776 - val_loss: 0.1535 - val_categorical_accuracy: 0.9635
Epoch 27/40
- 1s - loss: 0.0730 - categorical_accuracy: 0.9772 - val_loss: 0.1519 - val_categorical_accuracy: 0.9642
Epoch 28/40
- 1s - loss: 0.0684 - categorical_accuracy: 0.9790 - val_loss: 0.1465 - val_categorical_accuracy: 0.9627
Epoch 29/40
- 1s - loss: 0.0701 - categorical_accuracy: 0.9791 - val_loss: 0.1636 - val_categorical_accuracy: 0.9618
Epoch 30/40
- 1s - loss: 0.0678 - categorical_accuracy: 0.9792 - val_loss: 0.1657 - val_categorical_accuracy: 0.9632
Epoch 31/40
- 1s - loss: 0.0718 - categorical_accuracy: 0.9776 - val_loss: 0.1729 - val_categorical_accuracy: 0.9613
Epoch 32/40
- 1s - loss: 0.0627 - categorical_accuracy: 0.9800 - val_loss: 0.1766 - val_categorical_accuracy: 0.9630
Epoch 33/40
- 1s - loss: 0.0660 - categorical_accuracy: 0.9799 - val_loss: 0.1687 - val_categorical_accuracy: 0.9637
Epoch 34/40
- 1s - loss: 0.0658 - categorical_accuracy: 0.9794 - val_loss: 0.1725 - val_categorical_accuracy: 0.9625
Epoch 35/40
- 1s - loss: 0.0645 - categorical_accuracy: 0.9801 - val_loss: 0.1877 - val_categorical_accuracy: 0.9598
Epoch 36/40
- 1s - loss: 0.0597 - categorical_accuracy: 0.9811 - val_loss: 0.1848 - val_categorical_accuracy: 0.9615
Epoch 37/40
- 1s - loss: 0.0636 - categorical_accuracy: 0.9796 - val_loss: 0.1783 - val_categorical_accuracy: 0.9623
Epoch 38/40
- 1s - loss: 0.0613 - categorical_accuracy: 0.9799 - val_loss: 0.1982 - val_categorical_accuracy: 0.9620
Epoch 39/40
- 1s - loss: 0.0606 - categorical_accuracy: 0.9806 - val_loss: 0.1946 - val_categorical_accuracy: 0.9603
Epoch 40/40
- 1s - loss: 0.0607 - categorical_accuracy: 0.9809 - val_loss: 0.1808 - val_categorical_accuracy: 0.9625
32/10000 [..............................] - ETA: 0s
2784/10000 [=======>......................] - ETA: 0s
5664/10000 [===============>..............] - ETA: 0s
8480/10000 [========================>.....] - ETA: 0s
10000/10000 [==============================] - 0s 18us/step
loss: 0.238445
categorical_accuracy: 0.954700
Start: 2019-06-17 12:55:54.756491
End: 2019-06-17 12:56:34.303440
Elapse: 0:00:39.546949
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt" test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt" X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784) X_train = X_train.toarray() X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784) X_test = X_test.toarray() X_train = X_train.reshape( (X_train.shape[0], 28, 28, 1) ) X_train = X_train.astype('float32') X_train /= 255 y_train = to_categorical(y_train, num_classes=10) X_test = X_test.reshape( (X_test.shape[0], 28, 28, 1) ) X_test = X_test.astype('float32') X_test /= 255 y_test = to_categorical(y_test, num_classes=10)
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D model = Sequential() model.add(Conv2D(8, # number of kernels (4, 4), # kernel size padding='valid', # no padding; output will be smaller than input input_shape=(28, 28, 1))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) # alternative syntax for applying activation model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
start = datetime.datetime.today() history = model.fit(X_train, y_train, batch_size=128, epochs=8, verbose=2, validation_split=0.1) scores = model.evaluate(X_test, y_test, verbose=1) print for i in range(len(model.metrics_names)): print("%s: %f" % (model.metrics_names[i], scores[i]))
Train on 54000 samples, validate on 6000 samples
Epoch 1/8
- 11s - loss: 0.2796 - acc: 0.9216 - val_loss: 0.1007 - val_acc: 0.9740
Epoch 2/8
- 10s - loss: 0.0862 - acc: 0.9743 - val_loss: 0.0852 - val_acc: 0.9758
Epoch 3/8
- 10s - loss: 0.0611 - acc: 0.9815 - val_loss: 0.0562 - val_acc: 0.9843
Epoch 4/8
- 10s - loss: 0.0472 - acc: 0.9852 - val_loss: 0.0491 - val_acc: 0.9873
Epoch 5/8
- 10s - loss: 0.0378 - acc: 0.9889 - val_loss: 0.0533 - val_acc: 0.9842
Epoch 6/8
- 10s - loss: 0.0319 - acc: 0.9899 - val_loss: 0.0490 - val_acc: 0.9867
Epoch 7/8
- 10s - loss: 0.0259 - acc: 0.9921 - val_loss: 0.0477 - val_acc: 0.9882
Epoch 8/8
- 10s - loss: 0.0213 - acc: 0.9935 - val_loss: 0.0436 - val_acc: 0.9888
32/10000 [..............................] - ETA: 1s
448/10000 [>.............................] - ETA: 1s
864/10000 [=>............................] - ETA: 1s
1248/10000 [==>...........................] - ETA: 1s
1664/10000 [===>..........................] - ETA: 1s
2080/10000 [=====>........................] - ETA: 1s
2432/10000 [======>.......................] - ETA: 0s
2816/10000 [=======>......................] - ETA: 0s
3232/10000 [========>.....................] - ETA: 0s
3648/10000 [=========>....................] - ETA: 0s
4032/10000 [===========>..................] - ETA: 0s
4416/10000 [============>.................] - ETA: 0s
4832/10000 [=============>................] - ETA: 0s
5216/10000 [==============>...............] - ETA: 0s
5632/10000 [===============>..............] - ETA: 0s
6048/10000 [=================>............] - ETA: 0s
6464/10000 [==================>...........] - ETA: 0s
6880/10000 [===================>..........] - ETA: 0s
7264/10000 [====================>.........] - ETA: 0s
7680/10000 [======================>.......] - ETA: 0s
8064/10000 [=======================>......] - ETA: 0s
8480/10000 [========================>.....] - ETA: 0s
8864/10000 [=========================>....] - ETA: 0s
9248/10000 [==========================>...] - ETA: 0s
9632/10000 [===========================>..] - ETA: 0s
10000/10000 [==============================] - 1s 131us/step
loss: 0.040816
acc: 0.985200
model = Sequential() model.add(Conv2D(8, # number of kernels (4, 4), # kernel size padding='valid', input_shape=(28, 28, 1))) model.add(Activation('relu')) model.add(Conv2D(8, (4, 4))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2, validation_split=0.1) scores = model.evaluate(X_test, y_test, verbose=1) print for i in range(len(model.metrics_names)): print("%s: %f" % (model.metrics_names[i], scores[i]))
Train on 54000 samples, validate on 6000 samples
Epoch 1/15
- 20s - loss: 0.2972 - acc: 0.9140 - val_loss: 0.0893 - val_acc: 0.9762
Epoch 2/15
- 20s - loss: 0.0786 - acc: 0.9763 - val_loss: 0.0598 - val_acc: 0.9835
Epoch 3/15
- 20s - loss: 0.0533 - acc: 0.9840 - val_loss: 0.0445 - val_acc: 0.9873
Epoch 4/15
- 20s - loss: 0.0411 - acc: 0.9871 - val_loss: 0.0481 - val_acc: 0.9872
Epoch 5/15
- 20s - loss: 0.0311 - acc: 0.9899 - val_loss: 0.0476 - val_acc: 0.9878
Epoch 6/15
- 20s - loss: 0.0242 - acc: 0.9922 - val_loss: 0.0427 - val_acc: 0.9877
Epoch 7/15
- 20s - loss: 0.0206 - acc: 0.9934 - val_loss: 0.0459 - val_acc: 0.9880
Epoch 8/15
- 20s - loss: 0.0165 - acc: 0.9944 - val_loss: 0.0409 - val_acc: 0.9900
Epoch 9/15
- 20s - loss: 0.0140 - acc: 0.9954 - val_loss: 0.0467 - val_acc: 0.9880
Epoch 10/15
- 20s - loss: 0.0124 - acc: 0.9960 - val_loss: 0.0479 - val_acc: 0.9885
Epoch 11/15
- 20s - loss: 0.0092 - acc: 0.9968 - val_loss: 0.0484 - val_acc: 0.9868
Epoch 12/15
- 20s - loss: 0.0094 - acc: 0.9968 - val_loss: 0.0414 - val_acc: 0.9903
Epoch 13/15
- 20s - loss: 0.0080 - acc: 0.9972 - val_loss: 0.0544 - val_acc: 0.9892
Epoch 14/15
- 20s - loss: 0.0065 - acc: 0.9978 - val_loss: 0.0546 - val_acc: 0.9882
Epoch 15/15
- 20s - loss: 0.0065 - acc: 0.9976 - val_loss: 0.0505 - val_acc: 0.9870
32/10000 [..............................] - ETA: 1s
352/10000 [>.............................] - ETA: 1s
672/10000 [=>............................] - ETA: 1s
992/10000 [=>............................] - ETA: 1s
1280/10000 [==>...........................] - ETA: 1s
1568/10000 [===>..........................] - ETA: 1s
1888/10000 [====>.........................] - ETA: 1s
2176/10000 [=====>........................] - ETA: 1s
2464/10000 [======>.......................] - ETA: 1s
2784/10000 [=======>......................] - ETA: 1s
3072/10000 [========>.....................] - ETA: 1s
3392/10000 [=========>....................] - ETA: 1s
3712/10000 [==========>...................] - ETA: 1s
4032/10000 [===========>..................] - ETA: 1s
4352/10000 [============>.................] - ETA: 0s
4672/10000 [=============>................] - ETA: 0s
4992/10000 [=============>................] - ETA: 0s
5312/10000 [==============>...............] - ETA: 0s
5632/10000 [===============>..............] - ETA: 0s
5952/10000 [================>.............] - ETA: 0s
6272/10000 [=================>............] - ETA: 0s
6560/10000 [==================>...........] - ETA: 0s
6880/10000 [===================>..........] - ETA: 0s
7200/10000 [====================>.........] - ETA: 0s
7488/10000 [=====================>........] - ETA: 0s
7808/10000 [======================>.......] - ETA: 0s
8096/10000 [=======================>......] - ETA: 0s
8384/10000 [========================>.....] - ETA: 0s
8672/10000 [=========================>....] - ETA: 0s
8960/10000 [=========================>....] - ETA: 0s
9248/10000 [==========================>...] - ETA: 0s
9568/10000 [===========================>..] - ETA: 0s
9888/10000 [============================>.] - ETA: 0s
10000/10000 [==============================] - 2s 173us/step
loss: 0.054492
acc: 0.986500
model = Sequential() model.add(Conv2D(32, # number of kernels (3, 3), # kernel size padding='valid', input_shape=(28, 28, 1))) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) # <- regularize model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) # <-regularize model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2) scores = model.evaluate(X_test, y_test, verbose=2) print for i in range(len(model.metrics_names)): print("%s: %f" % (model.metrics_names[i], scores[i]))
WARNING:tensorflow:From /databricks/python/local/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
Epoch 1/15
- 68s - loss: 0.2545 - acc: 0.9228
Epoch 2/15
- 66s - loss: 0.0919 - acc: 0.9724
Epoch 3/15
- 66s - loss: 0.0688 - acc: 0.9792
Epoch 4/15
- 66s - loss: 0.0575 - acc: 0.9823
Epoch 5/15
- 66s - loss: 0.0512 - acc: 0.9840
Epoch 6/15
- 65s - loss: 0.0445 - acc: 0.9861
Epoch 7/15
- 65s - loss: 0.0376 - acc: 0.9882
Epoch 8/15
- 65s - loss: 0.0376 - acc: 0.9883
Epoch 9/15
- 65s - loss: 0.0340 - acc: 0.9888
Epoch 10/15
- 65s - loss: 0.0291 - acc: 0.9905
Epoch 11/15
- 65s - loss: 0.0290 - acc: 0.9908
Epoch 12/15
- 66s - loss: 0.0271 - acc: 0.9911
Epoch 13/15
- 65s - loss: 0.0248 - acc: 0.9919
Epoch 14/15
- 65s - loss: 0.0241 - acc: 0.9922
Epoch 15/15
- 65s - loss: 0.0207 - acc: 0.9931
loss: 0.030544
acc: 0.991500
SDS-2.x, Scalable Data Engineering Science
This is a 2019 augmentation and update of Adam Breindel's initial notebooks.
Last refresh: Never