Need help filling out the missing sections of this code- the sections.docx

Need help filling out the missing sections of this code. the sections missing are step 6, 7, and 9. Step 1: Load the Tox21 Dataset. import numpy as np np.random.seed(456) import tensorflow as tf tf.set_random_seed(456) import matplotlib.pyplot as plt import deepchem as dc from sklearn.metrics import accuracy_score _, (train, valid, test), _ = dc.molnet.load_tox21() train_X, train_y, train_w = train.X, train.y, train.w valid_X, valid_y, valid_w = valid.X, valid.y, valid.w test_X, test_y, test_w = test.X, test.y, test.w Step 2: Remove extra datasets. # Remove extra tasks train_y = train_y[:, 0] valid_y = valid_y[:, 0] test_y = test_y[:, 0] train_w = train_w[:, 0] valid_w = valid_w[:, 0] test_w = test_w[:, 0] Step 3: Define placeholders that accept minibatches of different sizes. # Generate tensorflow graph d = 1024 n_hidden = 50 learning_rate = .001 n_epochs = 10 batch_size = 100 with tf.name_scope("placeholders"): x = tf.placeholder(tf.float32, (None, d)) y = tf.placeholder(tf.float32, (None,)) Step 4: Implement a hidden layer. with tf.name_scope("hidden-layer"): W = tf.Variable(tf.random_normal((d, n_hidden))) b = tf.Variable(tf.random_normal((n_hidden,))) x_hidden = tf.nn.relu(tf.matmul(x, W) + b) Step 5: Complete the fully connected architecture. with tf.name_scope("output"): W = tf.Variable(tf.random_normal((n_hidden, 1))) b = tf.Variable(tf.random_normal((1,))) y_logit = tf.matmul(x_hidden, W) + b # the sigmoid gives the class probability of 1 y_one_prob = tf.sigmoid(y_logit) # Rounding P(y=1) will give the correct prediction. y_pred = tf.round(y_one_prob) with tf.name_scope("loss"): # Compute the cross-entropy term for each datapoint y_expand = tf.expand_dims(y, 1) entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit, labels=y_expand) # Sum all contributions l = tf.reduce_sum(entropy) with tf.name_scope("optim"): train_op = tf.train.AdamOptimizer(learning_rate).minimize(l) with tf.name_scope("summaries"): tf.summary.scalar("loss", l) merged = tf.summary.merge_all() Step 6: Add dropout to a hidden layer. Step 7: Define a hidden layer with dropout. Step 8: Implement mini-batching training. train_writer = tf.summary.FileWriter('/tmp/fcnet-tox21', tf.get_default_graph()) N = train_X.shape[0] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) step = 0 for epoch in range(n_epochs): pos = 0 while pos N: batch_X = train_X[pos:pos+batch_size] batch_y = train_y[pos:pos+batch_size] feed_dict = {x: batch_X, y: batch_y} _, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict) print("epoch %d, step %d, loss: %f" % (epoch, step, loss)) train_writer.add_summary(summary, step) step += 1 pos += batch_size # Make Predictions valid_y_pred = sess.run(y_pred, feed_dict={x: valid_X}) Step 9: Use TensorBoard to track model convergence. include screenshots for the following: 1) a TensorBoard graph for the model, and 2) the loss curve. .

Need help filling out the missing sections of this code. the sections missing are step 6, 7, and 9.
Step 1: Load the Tox21 Dataset.
import numpy as np
np.random.seed(456)
import tensorflow as tf
tf.set_random_seed(456)
import matplotlib.pyplot as plt
import deepchem as dc
from sklearn.metrics import accuracy_score
_, (train, valid, test), _ = dc.molnet.load_tox21()
train_X, train_y, train_w = train.X, train.y, train.w
valid_X, valid_y, valid_w = valid.X, valid.y, valid.w
test_X, test_y, test_w = test.X, test.y, test.w
Step 2: Remove extra datasets.
# Remove extra tasks
train_y = train_y[:, 0]
valid_y = valid_y[:, 0]
test_y = test_y[:, 0]
train_w = train_w[:, 0]
valid_w = valid_w[:, 0]
test_w = test_w[:, 0]
Step 3: Define placeholders that accept minibatches of different sizes.
# Generate tensorflow graph
d = 1024
n_hidden = 50
learning_rate = .001
n_epochs = 10
batch_size = 100
with tf.name_scope("placeholders"):
x = tf.placeholder(tf.float32, (None, d))
y = tf.placeholder(tf.float32, (None,))
Step 4: Implement a hidden layer.
with tf.name_scope("hidden-layer"):
W = tf.Variable(tf.random_normal((d, n_hidden)))
b = tf.Variable(tf.random_normal((n_hidden,)))
x_hidden = tf.nn.relu(tf.matmul(x, W) + b)
Step 5: Complete the fully connected architecture.
with tf.name_scope("output"):
W = tf.Variable(tf.random_normal((n_hidden, 1)))
b = tf.Variable(tf.random_normal((1,)))
y_logit = tf.matmul(x_hidden, W) + b
# the sigmoid gives the class probability of 1
y_one_prob = tf.sigmoid(y_logit)
# Rounding P(y=1) will give the correct prediction.
y_pred = tf.round(y_one_prob)
with tf.name_scope("loss"):
# Compute the cross-entropy term for each datapoint
y_expand = tf.expand_dims(y, 1)
entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit, labels=y_expand)
# Sum all contributions
l = tf.reduce_sum(entropy)
with tf.name_scope("optim"):
train_op = tf.train.AdamOptimizer(learning_rate).minimize(l)
with tf.name_scope("summaries"):
tf.summary.scalar("loss", l)
merged = tf.summary.merge_all()
Step 6: Add dropout to a hidden layer.
Step 7: Define a hidden layer with dropout.
Step 8: Implement mini-batching training.
train_writer = tf.summary.FileWriter('/tmp/fcnet-tox21',
tf.get_default_graph())
N = train_X.shape[0]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(n_epochs):
pos = 0
while pos N:
batch_X = train_X[pos:pos+batch_size]
batch_y = train_y[pos:pos+batch_size]
feed_dict = {x: batch_X, y: batch_y}
_, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict)
print("epoch %d, step %d, loss: %f" % (epoch, step, loss))
train_writer.add_summary(summary, step)
step += 1
pos += batch_size
# Make Predictions
valid_y_pred = sess.run(y_pred, feed_dict={x: valid_X})
Step 9: Use TensorBoard to track model convergence.
include screenshots for the following:
1) a TensorBoard graph for the model, and
2) the loss curve.
Step 1: Load the Tox21 Dataset.
import numpy as np
np.random.seed(456)
import tensorflow as tf
tf.set_random_seed(456)
import matplotlib.pyplot as plt
import deepchem as dc
from sklearn.metrics import accuracy_score
_, (train, valid, test), _ = dc.molnet.load_tox21()
train_X, train_y, train_w = train.X, train.y, train.w
valid_X, valid_y, valid_w = valid.X, valid.y, valid.w
test_X, test_y, test_w = test.X, test.y, test.w
Step 2: Remove extra datasets.
# Remove extra tasks
train_y = train_y[:, 0]
valid_y = valid_y[:, 0]
test_y = test_y[:, 0]
train_w = train_w[:, 0]
valid_w = valid_w[:, 0]
test_w = test_w[:, 0]
Step 3: Define placeholders that accept minibatches of different sizes.
# Generate tensorflow graph
d = 1024
n_hidden = 50
learning_rate = .001
n_epochs = 10
batch_size = 100
with tf.name_scope("placeholders"):
x = tf.placeholder(tf.float32, (None, d))
y = tf.placeholder(tf.float32, (None,))
Step 4: Implement a hidden layer.
with tf.name_scope("hidden-layer"):
W = tf.Variable(tf.random_normal((d, n_hidden)))
b = tf.Variable(tf.random_normal((n_hidden,)))
x_hidden = tf.nn.relu(tf.matmul(x, W) + b)
Step 5: Complete the fully connected architecture.
with tf.name_scope("output"):
W = tf.Variable(tf.random_normal((n_hidden, 1)))
b = tf.Variable(tf.random_normal((1,)))
y_logit = tf.matmul(x_hidden, W) + b
# the sigmoid gives the class probability of 1
y_one_prob = tf.sigmoid(y_logit)
# Rounding P(y=1) will give the correct prediction.
y_pred = tf.round(y_one_prob)
with tf.name_scope("loss"):
# Compute the cross-entropy term for each datapoint
y_expand = tf.expand_dims(y, 1)
entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit,
labels=y_expand)
# Sum all contributions
l = tf.reduce_sum(entropy)
with tf.name_scope("optim"):
train_op = tf.train.AdamOptimizer(learning_rate).minimize(l)
with tf.name_scope("summaries"):
tf.summary.scalar("loss", l)
merged = tf.summary.merge_all()
Step 6: Add dropout to a hidden layer.
Step 7: Define a hidden layer with dropout.
Step 8: Implement mini-batching training.
train_writer = tf.summary.FileWriter('/tmp/fcnet-tox21',
tf.get_default_graph())
N = train_X.shape[0]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(n_epochs):
pos = 0
while pos N:
batch_X = train_X[pos:pos+batch_size]
batch_y = train_y[pos:pos+batch_size]
feed_dict = {x: batch_X, y: batch_y}
_, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict)
print("epoch %d, step %d, loss: %f" % (epoch, step, loss))
train_writer.add_summary(summary, step)
step += 1
pos += batch_size
# Make Predictions
valid_y_pred = sess.run(y_pred, feed_dict={x: valid_X})
Step 9: Use TensorBoard to track model convergence.
Need help filling out the missing sections of this code- the sections.docx

Más contenido relacionado

Similar a Need help filling out the missing sections of this code- the sections.docx(20)

Machine Learning and Go. Go!Machine Learning and Go. Go!
Machine Learning and Go. Go!
Diana Ortega243 views
Power ai tensorflowworkloadtutorial-20171117Power ai tensorflowworkloadtutorial-20171117
Power ai tensorflowworkloadtutorial-20171117
Ganesan Narayanasamy234 views
Neural networks with pythonNeural networks with python
Neural networks with python
Simone Piunno1.6K views
maXbox starter65 machinelearning3maXbox starter65 machinelearning3
maXbox starter65 machinelearning3
Max Kleiner168 views
Pythonbrasil - 2018 - Acelerando Soluções com GPUPythonbrasil - 2018 - Acelerando Soluções com GPU
Pythonbrasil - 2018 - Acelerando Soluções com GPU
Paulo Sergio Lemes Queiroz97 views
Deep Learning, Scala, and SparkDeep Learning, Scala, and Spark
Deep Learning, Scala, and Spark
Oswald Campesato311 views
ML Assignment help.pptxML Assignment help.pptx
ML Assignment help.pptx
Robinjk3 views
Mpi in-pythonMpi in-python
Mpi in-python
A Jorge Garcia230 views
Matlab integrationMatlab integration
Matlab integration
pramodkumar1804474 views
TensorFlow in PracticeTensorFlow in Practice
TensorFlow in Practice
indico data5.4K views
PydiomaticPydiomatic
Pydiomatic
rik0924 views
Python idiomaticoPython idiomatico
Python idiomatico
PyCon Italia908 views
NTU ML TENSORFLOWNTU ML TENSORFLOW
NTU ML TENSORFLOW
Mark Chang4.9K views
Assignment 5.2.pdfAssignment 5.2.pdf
Assignment 5.2.pdf
dash414 views
Assignment 6.2a.pdfAssignment 6.2a.pdf
Assignment 6.2a.pdf
dash4110 views
TensorFlow example  for AI Ukraine2016TensorFlow example  for AI Ukraine2016
TensorFlow example for AI Ukraine2016
Andrii Babii1.2K views

Más de lauracallander(20)

Último(20)

2022 CAPE Merit List 2023 2022 CAPE Merit List 2023
2022 CAPE Merit List 2023
Caribbean Examinations Council2.3K views
Streaming Quiz 2023.pdfStreaming Quiz 2023.pdf
Streaming Quiz 2023.pdf
Quiz Club NITW77 views
Class 10 English  lesson plansClass 10 English  lesson plans
Class 10 English lesson plans
Tariq KHAN149 views
ME_URBAN_WAR.pptME_URBAN_WAR.ppt
ME_URBAN_WAR.ppt
Norvell (Tex) DeAtkine117 views
Marilyn And Len Exchanges, EssayMarilyn And Len Exchanges, Essay
Marilyn And Len Exchanges, Essay
Melissa Dudas69 views
STYP infopack.pdfSTYP infopack.pdf
STYP infopack.pdf
Fundacja Rozwoju Społeczeństwa Przedsiębiorczego125 views
Industry4wrd.pptxIndustry4wrd.pptx
Industry4wrd.pptx
BC Chew144 views
META Quiz Finals.pptxMETA Quiz Finals.pptx
META Quiz Finals.pptx
AtmadipMohanta237 views
231112 (WR) v1  ChatGPT OEB 2023.pdf231112 (WR) v1  ChatGPT OEB 2023.pdf
231112 (WR) v1 ChatGPT OEB 2023.pdf
WilfredRubens.com67 views
class-3   Derived lipids (steorids).pptxclass-3   Derived lipids (steorids).pptx
class-3 Derived lipids (steorids).pptx
Dr. Santhosh Kumar. N45 views
Part of speech in English LanguagePart of speech in English Language
Part of speech in English Language
JamaicaMacarayoBorga56 views
M. Pharm Unit 2. Regulatory Asspects.pptxM. Pharm Unit 2. Regulatory Asspects.pptx
M. Pharm Unit 2. Regulatory Asspects.pptx
Ashokrao Mane College of Pharmacy, Peth- Vadgaon86 views
NS3 Unit 2 Life processes of animals.pptxNS3 Unit 2 Life processes of animals.pptx
NS3 Unit 2 Life processes of animals.pptx
manuelaromero201368 views
A Day At 07.45 Using A CodeA Day At 07.45 Using A Code
A Day At 07.45 Using A Code
Ashley Lott55 views

Need help filling out the missing sections of this code- the sections.docx

  • 1. Need help filling out the missing sections of this code. the sections missing are step 6, 7, and 9. Step 1: Load the Tox21 Dataset. import numpy as np np.random.seed(456) import tensorflow as tf tf.set_random_seed(456) import matplotlib.pyplot as plt import deepchem as dc from sklearn.metrics import accuracy_score _, (train, valid, test), _ = dc.molnet.load_tox21() train_X, train_y, train_w = train.X, train.y, train.w valid_X, valid_y, valid_w = valid.X, valid.y, valid.w test_X, test_y, test_w = test.X, test.y, test.w Step 2: Remove extra datasets. # Remove extra tasks train_y = train_y[:, 0] valid_y = valid_y[:, 0] test_y = test_y[:, 0] train_w = train_w[:, 0] valid_w = valid_w[:, 0] test_w = test_w[:, 0] Step 3: Define placeholders that accept minibatches of different sizes. # Generate tensorflow graph
  • 2. d = 1024 n_hidden = 50 learning_rate = .001 n_epochs = 10 batch_size = 100 with tf.name_scope("placeholders"): x = tf.placeholder(tf.float32, (None, d)) y = tf.placeholder(tf.float32, (None,)) Step 4: Implement a hidden layer. with tf.name_scope("hidden-layer"): W = tf.Variable(tf.random_normal((d, n_hidden))) b = tf.Variable(tf.random_normal((n_hidden,))) x_hidden = tf.nn.relu(tf.matmul(x, W) + b) Step 5: Complete the fully connected architecture. with tf.name_scope("output"): W = tf.Variable(tf.random_normal((n_hidden, 1))) b = tf.Variable(tf.random_normal((1,))) y_logit = tf.matmul(x_hidden, W) + b # the sigmoid gives the class probability of 1 y_one_prob = tf.sigmoid(y_logit) # Rounding P(y=1) will give the correct prediction. y_pred = tf.round(y_one_prob) with tf.name_scope("loss"):
  • 3. # Compute the cross-entropy term for each datapoint y_expand = tf.expand_dims(y, 1) entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit, labels=y_expand) # Sum all contributions l = tf.reduce_sum(entropy) with tf.name_scope("optim"): train_op = tf.train.AdamOptimizer(learning_rate).minimize(l) with tf.name_scope("summaries"): tf.summary.scalar("loss", l) merged = tf.summary.merge_all() Step 6: Add dropout to a hidden layer. Step 7: Define a hidden layer with dropout. Step 8: Implement mini-batching training. train_writer = tf.summary.FileWriter('/tmp/fcnet-tox21', tf.get_default_graph()) N = train_X.shape[0] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) step = 0 for epoch in range(n_epochs): pos = 0 while pos N: batch_X = train_X[pos:pos+batch_size]
  • 4. batch_y = train_y[pos:pos+batch_size] feed_dict = {x: batch_X, y: batch_y} _, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict) print("epoch %d, step %d, loss: %f" % (epoch, step, loss)) train_writer.add_summary(summary, step) step += 1 pos += batch_size # Make Predictions valid_y_pred = sess.run(y_pred, feed_dict={x: valid_X}) Step 9: Use TensorBoard to track model convergence. include screenshots for the following: 1) a TensorBoard graph for the model, and 2) the loss curve. Step 1: Load the Tox21 Dataset. import numpy as np np.random.seed(456) import tensorflow as tf tf.set_random_seed(456) import matplotlib.pyplot as plt import deepchem as dc from sklearn.metrics import accuracy_score _, (train, valid, test), _ = dc.molnet.load_tox21() train_X, train_y, train_w = train.X, train.y, train.w
  • 5. valid_X, valid_y, valid_w = valid.X, valid.y, valid.w test_X, test_y, test_w = test.X, test.y, test.w Step 2: Remove extra datasets. # Remove extra tasks train_y = train_y[:, 0] valid_y = valid_y[:, 0] test_y = test_y[:, 0] train_w = train_w[:, 0] valid_w = valid_w[:, 0] test_w = test_w[:, 0] Step 3: Define placeholders that accept minibatches of different sizes. # Generate tensorflow graph d = 1024 n_hidden = 50 learning_rate = .001 n_epochs = 10 batch_size = 100 with tf.name_scope("placeholders"): x = tf.placeholder(tf.float32, (None, d)) y = tf.placeholder(tf.float32, (None,)) Step 4: Implement a hidden layer. with tf.name_scope("hidden-layer"): W = tf.Variable(tf.random_normal((d, n_hidden)))
  • 6. b = tf.Variable(tf.random_normal((n_hidden,))) x_hidden = tf.nn.relu(tf.matmul(x, W) + b) Step 5: Complete the fully connected architecture. with tf.name_scope("output"): W = tf.Variable(tf.random_normal((n_hidden, 1))) b = tf.Variable(tf.random_normal((1,))) y_logit = tf.matmul(x_hidden, W) + b # the sigmoid gives the class probability of 1 y_one_prob = tf.sigmoid(y_logit) # Rounding P(y=1) will give the correct prediction. y_pred = tf.round(y_one_prob) with tf.name_scope("loss"): # Compute the cross-entropy term for each datapoint y_expand = tf.expand_dims(y, 1) entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit, labels=y_expand) # Sum all contributions l = tf.reduce_sum(entropy) with tf.name_scope("optim"): train_op = tf.train.AdamOptimizer(learning_rate).minimize(l) with tf.name_scope("summaries"): tf.summary.scalar("loss", l) merged = tf.summary.merge_all()
  • 7. Step 6: Add dropout to a hidden layer. Step 7: Define a hidden layer with dropout. Step 8: Implement mini-batching training. train_writer = tf.summary.FileWriter('/tmp/fcnet-tox21', tf.get_default_graph()) N = train_X.shape[0] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) step = 0 for epoch in range(n_epochs): pos = 0 while pos N: batch_X = train_X[pos:pos+batch_size] batch_y = train_y[pos:pos+batch_size] feed_dict = {x: batch_X, y: batch_y} _, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict) print("epoch %d, step %d, loss: %f" % (epoch, step, loss)) train_writer.add_summary(summary, step) step += 1 pos += batch_size # Make Predictions valid_y_pred = sess.run(y_pred, feed_dict={x: valid_X}) Step 9: Use TensorBoard to track model convergence.