Maison  >  Questions et réponses  >  le corps du texte

python - Comment utiliser TFRecord dans Tensorflow?

  1. Comment remplacer l'ensemble de données mnist dans le code suivant par TFRecord

  2. Supposons que l'ensemble de données TFRecord ait été préparé, train.tfrecordstest.tfrecords sont tous dans le répertoire py actuel

  3. Il existe déjà un code de lecture TFRecord.

def read_and_decode(filename):
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label': tf.FixedLenFeature([], tf.int64),
                                           'img_raw': tf.FixedLenFeature([], tf.string),
                                       })
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [512, 288, 3])
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)
    return img, label
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

mnist = input_data.read_data_sets("/tmp/tensorflow/mnist/input_data", one_hot=True)

# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 20

# Network Parameters
n_input = 784  # MNIST data input (img shape: 28*28)
n_classes = 10  # MNIST total classes (0-9 digits)
dropout = 0.75  # Dropout, probability to keep units

# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)  # dropout (keep probability)


def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))


# Create custom model
def conv2d(name, l_input, w, b):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'), b), name=name)


def max_pool(name, l_input, k):
    return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)


def norm(name, l_input, lsize=4):
    return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)


def dnn(_x, _weights, _biases, _dropout):
    _x = tf.nn.dropout(_x, _dropout)
    d1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(_x, _weights['wd1']), _biases['bd1']), name="d1")

    d2x = tf.nn.dropout(d1, _dropout)
    d2 = tf.nn.relu(tf.nn.bias_add(tf.matmul(d2x, _weights['wd2']), _biases['bd2']), name="d2")

    dout = tf.nn.dropout(d2, _dropout)
    out = tf.matmul(dout, _weights['out']) + _biases['out']
    return out


# Store layers weight & bias
weights = {
    'wd1': tf.Variable(tf.random_normal([784, 600], stddev=0.01)),
    'wd2': tf.Variable(tf.random_normal([600, 480], stddev=0.01)),
    'out': tf.Variable(tf.random_normal([480, 10]))
}

biases = {
    'bd1': tf.Variable(tf.random_normal([600])),
    'bd2': tf.Variable(tf.random_normal([480])),
    'out': tf.Variable(tf.random_normal([10]))
}

# Construct model
pred = dnn(x, weights, biases, keep_prob)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()

#
tf.summary.scalar("loss", cost)
tf.summary.scalar("accuracy", accuracy)
# Merge all summaries to a single operator
merged_summary_op = tf.summary.merge_all()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    summary_writer = tf.summary.FileWriter('/tmp/logs/ex12_dnn', graph=sess.graph)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        # Fit training using batch data
        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
            summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            summary_writer.add_summary(summary_str, step)
        step += 1
    print("Optimization Finished!")
    # Calculate accuracy for 256 mnist test images
    print("Testing Accuracy:",
          sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))
    # 98%

Je ne sais pas comment l'utiliser spécifiquement, mais après l'avoir modifié plusieurs fois, j'obtiens toujours une erreur

L'erreur est similaire

ValueError: Only call `softmax_cross_entropy_with_logits` with named arguments (labels=..., logits=..., ...)
大家讲道理大家讲道理2711 Il y a quelques jours902

répondre à tous(1)je répondrai

  • 黄舟

    黄舟2017-05-18 10:49:09

    Je ne sais pas si je comprends ce que vous voulez dire. Ce que lit ce code mnist = input_data.read_data_sets("/tmp/tensorflow/mnist/input_data", one_hot=True), ce sont les données mnist. Vous le remplacez, puis utilisez le code de lecture TFRecord pour lire les données TFRecord et remplacez mnist dans le code pour entraîner le réseau ci-dessous. et assurez-vous que les paramètres d'opération de convolution que vous utilisez correspondent aux données TFRecord.

    répondre
    0
  • Annulerrépondre