Assignment 6.2b.pdf

D

data science

Model: "sequential_7"

_________________________________________________________________

Layer (type) Output Shape Param # 

=================================================================

conv2d_13 (Conv2D) (None, 32, 32, 32) 896 

_________________________________________________________________

conv2d_14 (Conv2D) (None, 30, 30, 32) 9248 

_________________________________________________________________

max_pooling2d_12 (MaxPooling (None, 15, 15, 32) 0 

_________________________________________________________________

dropout_15 (Dropout) (None, 15, 15, 32) 0 

_________________________________________________________________

conv2d_15 (Conv2D) (None, 13, 13, 64) 18496 

_________________________________________________________________

conv2d_16 (Conv2D) (None, 11, 11, 64) 36928 

_________________________________________________________________

max_pooling2d_13 (MaxPooling (None, 5, 5, 64) 0 

_________________________________________________________________

dropout_16 (Dropout) (None, 5, 5, 64) 0 

_________________________________________________________________

conv2d_17 (Conv2D) (None, 3, 3, 128) 73856 

_________________________________________________________________

conv2d_18 (Conv2D) (None, 1, 1, 128) 147584 

_________________________________________________________________

In [29]: from keras.datasets import cifar10

from keras.utils import to_categorical



(x_train, y_train), (x_test, y_test) = cifar10.load_data()



# preprocessing

x_train = x_train.astype('float32') # for division

x_test = x_test.astype('float32')

x_train /= 255 # normalise

x_test /= 255





num_classes = 10

y_train = to_categorical(y_train, num_classes)

y_test = to_categorical(y_test, num_classes)

In [30]: from keras import models

from keras import layers

model = models.Sequential()

model.add(layers.Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))

model.add(layers.Conv2D(32, (3, 3), activation='relu'))

model.add(layers.MaxPooling2D(pool_size=(2, 2)))

model.add(layers.Dropout(0.25))



model.add(layers.Conv2D(64, (3, 3), activation='relu'))

model.add(layers.Conv2D(64, (3, 3), activation='relu'))

model.add(layers.MaxPooling2D(pool_size=(2, 2)))

model.add(layers.Dropout(0.25))



model.add(layers.Conv2D(128, (3, 3), activation='relu'))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))

model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='same'))

model.add(layers.Dropout(0.25))



model.add(layers.Flatten())

model.add(layers.Dense(256, activation='relu'))

model.add(layers.Dropout(0.5))

model.add(layers.Dense(num_classes, activation='softmax'))



model.summary()
max_pooling2d_14 (MaxPooling (None, 1, 1, 128) 0 

_________________________________________________________________

dropout_17 (Dropout) (None, 1, 1, 128) 0 

_________________________________________________________________

flatten_4 (Flatten) (None, 128) 0 

_________________________________________________________________

dense_7 (Dense) (None, 256) 33024 

_________________________________________________________________

dropout_18 (Dropout) (None, 256) 0 

_________________________________________________________________

dense_8 (Dense) (None, 10) 2570 

=================================================================

Total params: 322,602

Trainable params: 322,602

Non-trainable params: 0

_________________________________________________________________

Epoch 1/10

1563/1563 [==============================] - 43s 27ms/step - loss: 1.9481 - accurac
y: 0.2608 - val_loss: 1.2860 - val_accuracy: 0.5321

Epoch 2/10

1563/1563 [==============================] - 40s 26ms/step - loss: 1.3612 - accurac
y: 0.5119 - val_loss: 1.1348 - val_accuracy: 0.5983

Epoch 3/10

1563/1563 [==============================] - 40s 26ms/step - loss: 1.1932 - accurac
y: 0.5855 - val_loss: 1.0013 - val_accuracy: 0.6464

Epoch 4/10

1563/1563 [==============================] - 41s 26ms/step - loss: 1.0883 - accurac
y: 0.6203 - val_loss: 0.9499 - val_accuracy: 0.6663

Epoch 5/10

1563/1563 [==============================] - 39s 25ms/step - loss: 1.0002 - accurac
y: 0.6536 - val_loss: 0.9631 - val_accuracy: 0.6649

Epoch 6/10

1563/1563 [==============================] - 40s 25ms/step - loss: 0.9573 - accurac
y: 0.6677 - val_loss: 0.8771 - val_accuracy: 0.6908

Epoch 7/10

1563/1563 [==============================] - 40s 26ms/step - loss: 0.8971 - accurac
y: 0.6904 - val_loss: 0.9248 - val_accuracy: 0.6825

Epoch 8/10

1563/1563 [==============================] - 39s 25ms/step - loss: 0.8803 - accurac
y: 0.6988 - val_loss: 0.8633 - val_accuracy: 0.6991

Epoch 9/10

1563/1563 [==============================] - 39s 25ms/step - loss: 0.8586 - accurac
y: 0.7057 - val_loss: 0.8159 - val_accuracy: 0.7234

Epoch 10/10

1563/1563 [==============================] - 39s 25ms/step - loss: 0.8308 - accurac
y: 0.7143 - val_loss: 0.8188 - val_accuracy: 0.7245

In [31]: model.compile(loss='categorical_crossentropy',

optimizer='adam',

metrics=['accuracy'])

In [32]: history=model.fit(x_train, y_train, batch_size=32,

validation_data=(x_test, y_test),

epochs=10)

In [33]: train_loss = history.history['loss']

val_loss = history.history['val_loss']



epochs = range(1, len(history.history['loss']) + 1)

In [35]: import matplotlib.pyplot as plt

plt.plot(epochs, train_loss, 'bo', label='Loss_Training')

plt.plot(epochs, val_loss, 'b', label='Loss_Validation')

plt.title('Training and Validation Losses')

plt.xlabel('Epochs')

plt.ylabel('Loss')
<Figure size 432x288 with 0 Axes>
<Figure size 432x288 with 0 Axes>
plt.legend()



plt.show()

plt.savefig('Results/6_2b_lossplot.png')

In [36]: train_loss = history.history['accuracy']

val_loss = history.history['val_accuracy']



epochs = range(1, len(history.history['accuracy']) + 1)

In [37]: plt.plot(epochs, train_loss, 'bo', label='Training accuracy')

plt.plot(epochs, val_loss, 'b', label='Validation accuracy')

plt.title('Training and Validation Accuracy')

plt.xlabel('Epochs')

plt.ylabel('Accuracy')

plt.legend()



plt.show()

plt.savefig('Results/6_2b_accplot.png')

In [38]: model.save('Results/6_2b_model.h5')

In [39]: score = model.evaluate(x_test, y_test)



print('Test accuracy: ', score[1])
313/313 [==============================] - 2s 7ms/step - loss: 0.8188 - accuracy: 0.
7245

Test accuracy: 0.7245000004768372
Actual Predictions

0 [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 3

1 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ... 8

2 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ... 8

3 [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 0

4 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, ... 6

... ... ...

9995 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ... 3

9996 [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 6

9997 [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, ... 5

9998 [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 1

9999 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, ... 7

[10000 rows x 2 columns]

In [41]: import pandas as pd

import numpy as np

predictions = np.argmax(model.predict(x_test), axis=1)


predictions = list(predictions)

actuals = list(y_test)



pred_res = pd.DataFrame({'Actual': actuals, 'Predictions': predictions})

pred_res.to_csv('Results/6_2b_predictions.csv', index=False)

print (pred_res)

In [42]: #Metrics output

with open('Results/6_2b_metrics.txt', 'w') as f:

f.write('Training Loss: {}'.format(str(history.history['loss'])))

f.write('nTraining Accuracy: {}'.format(str(history.history['accuracy'])))

f.write('nTest Loss: {}'.format(score[0]))

f.write('nTest Accuracy: {}'.format(score[1]))

In [ ]:

Más contenido relacionado

Similar a Assignment 6.2b.pdf(20)

Caret Package for RCaret Package for R
Caret Package for R
kmettler13.8K views
Caret max kuhnCaret max kuhn
Caret max kuhn
kmettler1.7K views
DSP LAB COMPLETE CODES.docxDSP LAB COMPLETE CODES.docx
DSP LAB COMPLETE CODES.docx
MUMAR5711 views
Time series data mining techniquesTime series data mining techniques
Time series data mining techniques
Shanmukha S. Potti3.6K views
Arna Friend Controls II FinalArna Friend Controls II Final
Arna Friend Controls II Final
Arna Friend187 views
PerformancePerformance
Performance
Cary Millsap205 views
0013 chapter vi0013 chapter vi
0013 chapter vi
aleli ariola294 views
When RV Meets CEP (RV 2016 Tutorial)When RV Meets CEP (RV 2016 Tutorial)
When RV Meets CEP (RV 2016 Tutorial)
Sylvain Hallé910 views
ADAPTIVE SIMULATED ANNEALING (ASAADAPTIVE SIMULATED ANNEALING (ASA
ADAPTIVE SIMULATED ANNEALING (ASA
Darian Pruitt4 views
Css grid-layoutCss grid-layout
Css grid-layout
Wendy Huang218 views
Module nco rtlModule nco rtl
Module nco rtl
Venkat Malai Avichi360 views

Más de dash41

Assignment7.pdfAssignment7.pdf
Assignment7.pdfdash41
37 views6 Folien
Assignment 6.3.pdfAssignment 6.3.pdf
Assignment 6.3.pdfdash41
16 views2 Folien
Assignment 6.1.pdfAssignment 6.1.pdf
Assignment 6.1.pdfdash41
14 views4 Folien
Assignment 5.3.pdfAssignment 5.3.pdf
Assignment 5.3.pdfdash41
19 views4 Folien
Assignment 4.pdfAssignment 4.pdf
Assignment 4.pdfdash41
29 views10 Folien
Assignment 3.pdfAssignment 3.pdf
Assignment 3.pdfdash41
32 views17 Folien

Más de dash41(8)

Assignment7.pdfAssignment7.pdf
Assignment7.pdf
dash4137 views
Assignment 6.3.pdfAssignment 6.3.pdf
Assignment 6.3.pdf
dash4116 views
Assignment 6.1.pdfAssignment 6.1.pdf
Assignment 6.1.pdf
dash4114 views
Assignment 5.3.pdfAssignment 5.3.pdf
Assignment 5.3.pdf
dash4119 views
Assignment 4.pdfAssignment 4.pdf
Assignment 4.pdf
dash4129 views
Assignment 3.pdfAssignment 3.pdf
Assignment 3.pdf
dash4132 views
rdbms.pdfrdbms.pdf
rdbms.pdf
dash412 views
documentsdb.pdfdocumentsdb.pdf
documentsdb.pdf
dash412 views

Último(20)

MOSORE_BRESCIAMOSORE_BRESCIA
MOSORE_BRESCIA
Federico Karagulian5 views
RuleBookForTheFairDataEconomy.pptxRuleBookForTheFairDataEconomy.pptx
RuleBookForTheFairDataEconomy.pptx
noraelstela166 views
Microsoft Fabric.pptxMicrosoft Fabric.pptx
Microsoft Fabric.pptx
Shruti Chaurasia19 views
Building Real-Time Travel AlertsBuilding Real-Time Travel Alerts
Building Real-Time Travel Alerts
Timothy Spann102 views
Data structure and algorithm. Data structure and algorithm.
Data structure and algorithm.
Abdul salam 12 views
Introduction to Microsoft Fabric.pdfIntroduction to Microsoft Fabric.pdf
Introduction to Microsoft Fabric.pdf
ishaniuudeshika21 views
PTicketInput.pdfPTicketInput.pdf
PTicketInput.pdf
stuartmcphersonflipm314 views
3196 The Case of The East River3196 The Case of The East River
3196 The Case of The East River
ErickANDRADE9011 views
Survey on Factuality in LLM's.pptxSurvey on Factuality in LLM's.pptx
Survey on Factuality in LLM's.pptx
NeethaSherra15 views
RIO GRANDE SUPPLY COMPANY INC, JAYSON.docxRIO GRANDE SUPPLY COMPANY INC, JAYSON.docx
RIO GRANDE SUPPLY COMPANY INC, JAYSON.docx
JaysonGarabilesEspej6 views
PROGRAMME.pdfPROGRAMME.pdf
PROGRAMME.pdf
HiNedHaJar14 views

Assignment 6.2b.pdf

  • 1. Model: "sequential_7" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_13 (Conv2D) (None, 32, 32, 32) 896 _________________________________________________________________ conv2d_14 (Conv2D) (None, 30, 30, 32) 9248 _________________________________________________________________ max_pooling2d_12 (MaxPooling (None, 15, 15, 32) 0 _________________________________________________________________ dropout_15 (Dropout) (None, 15, 15, 32) 0 _________________________________________________________________ conv2d_15 (Conv2D) (None, 13, 13, 64) 18496 _________________________________________________________________ conv2d_16 (Conv2D) (None, 11, 11, 64) 36928 _________________________________________________________________ max_pooling2d_13 (MaxPooling (None, 5, 5, 64) 0 _________________________________________________________________ dropout_16 (Dropout) (None, 5, 5, 64) 0 _________________________________________________________________ conv2d_17 (Conv2D) (None, 3, 3, 128) 73856 _________________________________________________________________ conv2d_18 (Conv2D) (None, 1, 1, 128) 147584 _________________________________________________________________ In [29]: from keras.datasets import cifar10 from keras.utils import to_categorical (x_train, y_train), (x_test, y_test) = cifar10.load_data() # preprocessing x_train = x_train.astype('float32') # for division x_test = x_test.astype('float32') x_train /= 255 # normalise x_test /= 255 num_classes = 10 y_train = to_categorical(y_train, num_classes) y_test = to_categorical(y_test, num_classes) In [30]: from keras import models from keras import layers model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(layers.Conv2D(32, (3, 3), activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='same')) model.add(layers.Dropout(0.25)) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(num_classes, activation='softmax')) model.summary()
  • 2. max_pooling2d_14 (MaxPooling (None, 1, 1, 128) 0 _________________________________________________________________ dropout_17 (Dropout) (None, 1, 1, 128) 0 _________________________________________________________________ flatten_4 (Flatten) (None, 128) 0 _________________________________________________________________ dense_7 (Dense) (None, 256) 33024 _________________________________________________________________ dropout_18 (Dropout) (None, 256) 0 _________________________________________________________________ dense_8 (Dense) (None, 10) 2570 ================================================================= Total params: 322,602 Trainable params: 322,602 Non-trainable params: 0 _________________________________________________________________ Epoch 1/10 1563/1563 [==============================] - 43s 27ms/step - loss: 1.9481 - accurac y: 0.2608 - val_loss: 1.2860 - val_accuracy: 0.5321 Epoch 2/10 1563/1563 [==============================] - 40s 26ms/step - loss: 1.3612 - accurac y: 0.5119 - val_loss: 1.1348 - val_accuracy: 0.5983 Epoch 3/10 1563/1563 [==============================] - 40s 26ms/step - loss: 1.1932 - accurac y: 0.5855 - val_loss: 1.0013 - val_accuracy: 0.6464 Epoch 4/10 1563/1563 [==============================] - 41s 26ms/step - loss: 1.0883 - accurac y: 0.6203 - val_loss: 0.9499 - val_accuracy: 0.6663 Epoch 5/10 1563/1563 [==============================] - 39s 25ms/step - loss: 1.0002 - accurac y: 0.6536 - val_loss: 0.9631 - val_accuracy: 0.6649 Epoch 6/10 1563/1563 [==============================] - 40s 25ms/step - loss: 0.9573 - accurac y: 0.6677 - val_loss: 0.8771 - val_accuracy: 0.6908 Epoch 7/10 1563/1563 [==============================] - 40s 26ms/step - loss: 0.8971 - accurac y: 0.6904 - val_loss: 0.9248 - val_accuracy: 0.6825 Epoch 8/10 1563/1563 [==============================] - 39s 25ms/step - loss: 0.8803 - accurac y: 0.6988 - val_loss: 0.8633 - val_accuracy: 0.6991 Epoch 9/10 1563/1563 [==============================] - 39s 25ms/step - loss: 0.8586 - accurac y: 0.7057 - val_loss: 0.8159 - val_accuracy: 0.7234 Epoch 10/10 1563/1563 [==============================] - 39s 25ms/step - loss: 0.8308 - accurac y: 0.7143 - val_loss: 0.8188 - val_accuracy: 0.7245 In [31]: model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) In [32]: history=model.fit(x_train, y_train, batch_size=32, validation_data=(x_test, y_test), epochs=10) In [33]: train_loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(history.history['loss']) + 1) In [35]: import matplotlib.pyplot as plt plt.plot(epochs, train_loss, 'bo', label='Loss_Training') plt.plot(epochs, val_loss, 'b', label='Loss_Validation') plt.title('Training and Validation Losses') plt.xlabel('Epochs') plt.ylabel('Loss')
  • 3. <Figure size 432x288 with 0 Axes> <Figure size 432x288 with 0 Axes> plt.legend() plt.show() plt.savefig('Results/6_2b_lossplot.png') In [36]: train_loss = history.history['accuracy'] val_loss = history.history['val_accuracy'] epochs = range(1, len(history.history['accuracy']) + 1) In [37]: plt.plot(epochs, train_loss, 'bo', label='Training accuracy') plt.plot(epochs, val_loss, 'b', label='Validation accuracy') plt.title('Training and Validation Accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() plt.savefig('Results/6_2b_accplot.png') In [38]: model.save('Results/6_2b_model.h5') In [39]: score = model.evaluate(x_test, y_test) print('Test accuracy: ', score[1])
  • 4. 313/313 [==============================] - 2s 7ms/step - loss: 0.8188 - accuracy: 0. 7245 Test accuracy: 0.7245000004768372 Actual Predictions 0 [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 3 1 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ... 8 2 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ... 8 3 [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 0 4 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, ... 6 ... ... ... 9995 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ... 3 9996 [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 6 9997 [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, ... 5 9998 [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ... 1 9999 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, ... 7 [10000 rows x 2 columns] In [41]: import pandas as pd import numpy as np predictions = np.argmax(model.predict(x_test), axis=1) predictions = list(predictions) actuals = list(y_test) pred_res = pd.DataFrame({'Actual': actuals, 'Predictions': predictions}) pred_res.to_csv('Results/6_2b_predictions.csv', index=False) print (pred_res) In [42]: #Metrics output with open('Results/6_2b_metrics.txt', 'w') as f: f.write('Training Loss: {}'.format(str(history.history['loss']))) f.write('nTraining Accuracy: {}'.format(str(history.history['accuracy']))) f.write('nTest Loss: {}'.format(score[0])) f.write('nTest Accuracy: {}'.format(score[1])) In [ ]: