Tensorflow2.0 & Keras

keras兼容了tensorflow,也兼容了Theano

https://www.tensorflow.org/tutorials/

Basic

import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow import keras
from tensorflow.keras import layers
tf.keras.backend.clear_session()


COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']

# Get Train and Test Data Set Directly from Tensorflow so we won't do train and test spilt in this case
#从tensorflow的网址上将数据集下载到本地
train_path = tf.keras.utils.get_file(
    "iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv")
test_path = tf.keras.utils.get_file(
    "iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv")

train = pd.read_csv(train_path, names=COLUMN_NAMES, header=0)
test = pd.read_csv(test_path, names=COLUMN_NAMES, header=0)

print(train.head())

# Seperate Features and Labels by removing Species from train and test set and storing it in train_Y and test_y
train_y = train.pop('Species')
test_y = test.pop('Species')

#----------------------
#Create Model
#----------------------
inputs = keras.Input(shape=(4,), name='flowers')
#inputs是tensor类型的   Tensor("flowers:0", shape=(None, 4), dtype=float32)

x = layers.Dense(units=64, activation='relu', name='layer_1')(inputs)
x = layers.Dense(units=64, activation='relu', name='layer_2')(x)
outputs = layers.Dense(units=3, activation='softmax', name='predictions')(x)

model = keras.Model(inputs=inputs, outputs=outputs)
tensorboard_callback = keras.callbacks.TensorBoard(log_dir="logs")

# Optimizer which helps minimize the loss function and it is a compulsory argument
model.compile(optimizer=keras.optimizers.RMSprop(),
              # Loss function to minimize (Also a compulsory argument)
              loss=keras.losses.SparseCategoricalCrossentropy(),
              # List of metrics to monitor
              metrics=[keras.metrics.SparseCategoricalAccuracy()])


#----------------------
#Train
#----------------------
X = train.values[:100]
y = train_y.values[:100]
x_val = train.values[100:]
y_val = train_y.values[100:]
history = model.fit(X, y,
                    batch_size=64,
                    epochs=100,
                    callbacks=[tensorboard_callback],
                    # We pass some validation for
                    # monitoring validation loss and metrics
                    # at the end of each epoch
                    validation_data=(x_val, y_val))
print(history.history)


#----------------------
#Analyse
#----------------------
print(model.summary())
keras.utils.plot_model(model, show_shapes=True)

#----------------------
#Predict
#----------------------
x_test = test.values
y_test = test_y.values
results = model.evaluate(x_test, y_test, batch_size=64)
print('test loss, test acc:', results)
res = model.evaluate(train.values, train_y.values, batch_size=64)
print('train loss, train acc:', res)

#----------------------
#Visulize
#----------------------
# %tensorboard --logdir ./model/logs

https://colab.research.google.com/drive/1c7YkLvrXxwYU7yDZCEtEYGeVkILkaHt_

Examples for beginners

import tensorflow as tf

tf.keras.backend.clear_session()

mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

model = tf.keras.models.Sequential([
  tf.keras.layers.Flatten(input_shape=(28, 28)),
  tf.keras.layers.Dense(128, activation='relu'),
  tf.keras.layers.Dropout(0.2),
  tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5)

model.evaluate(x_test,  y_test, verbose=2)

这个照片分类器的准确度已经达到 98%

Tensorflow Estimator

https://colab.research.google.com/drive/1wgk2mYRZLVlpVyipuJA6eXi06ZTBTxF1#scrollTo=y1jxA2nmrpbR

import tensorflow as tf
import numpy as np
import pandas as pd

COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']

train_path = tf.keras.utils.get_file(
    "iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv")
test_path = tf.keras.utils.get_file(
    "iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv")

train = pd.read_csv(train_path, names=COLUMN_NAMES, header=0)
test = pd.read_csv(test_path, names=COLUMN_NAMES, header=0)

train_y = train.pop('Species')
test_y = test.pop('Species')


# Create Model
# Input_fn function helps convert the pandas dataframe into a tensorflow dataset which is the only acceptable input to the Tensorflow Estimator while training and predicting / evaluating.
# If the data is being used for training the function returns randomly shuffled training samples from the dataset in batches of 256
def input_fn(features, labels, training=True, batch_size=256):
    """An input function for training or evaluating"""
    # Convert the inputs to a Tensorflow Dataset.
    # print(dict(features))
    dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))

    # Shuffle and repeat if you are in training mode.
    if training:
        dataset = dataset.shuffle(1000).repeat()

    return dataset.batch(batch_size)

feature_columns = []
for key in train.keys():
    feature_columns.append(tf.feature_column.numeric_column(key=key))

print(feature_columns)


logistic_regression_classifier = tf.estimator.LinearClassifier(
    feature_columns= feature_columns,
    # The model's location in your computer
    model_dir="./model/logs/LR",
    # The model must choose between 3 classes.
    n_classes=3
)

# This exercise also uses a DNNClassifier estimator which is a pre-made estimator (essentially a Multilayer Perceptron) in Tensorflow 2.0 and is designed for multi-class classification and it is the most suitable for us in Iris Dataset with 3 class labels
# The current DNN Classifier is a neural network with 2 hidden layers having 60 neurons and 30 neurons each, so the model will have an input layer consisting of the 4 features, the 2 hidden layers of neurons and the output layer consisting a probability estimate for each class, hence the output will give probabilities for each of the 3 class labels. For eg: One output can be {'Setosa': 90%, 'Versicolor': 2%, 'Virginica': 8%}

#Builds a DNN with 2 hidden layers with 60 and 30 hidden nodes each.
DNNClassifier = tf.estimator.DNNClassifier(
    feature_columns= feature_columns,
    hidden_units=[60, 30],
    # The model's location in your computer
    model_dir="./model/logs/DNN",
    # The model must choose between 3 classes.
    n_classes=3)

# Training
# We train both the models for 7000 steps to compare the performance. A step is one feed forward propogation of a batch of the data through the model during training.
# Note that Logistic Regression is not a neural network (Just uses one sigmoid activation function, can be though of as 1 neuron network).
# However DNN Classifier is nerual network based model and it is a multilayer perceptron.

logistic_regression_classifier.train(
    input_fn=lambda: input_fn(train, train_y, training=True), steps=7000
)

#Train the Model
DNNClassifier.train(
    input_fn=lambda: input_fn(train, train_y, training=True), steps=7000
)

# Evaluating
# Accuracies using Logistic Regression
eval_result = logistic_regression_classifier.evaluate(
    input_fn=lambda: input_fn(test, test_y, training=False), name='LR test')

print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

eval_result_train = logistic_regression_classifier.evaluate(
    input_fn=lambda: input_fn(train, train_y, training=False), name='LR train')

print('\nTrain set accuracy: {accuracy:0.3f}\n'.format(**eval_result_train))

# Accuracies using MultiLayer Perceptron (DNN Classifier)
# The name parameter in evaluate property of the classifier is used during visualisation of accuracy in tensorboard
eval_result = DNNClassifier.evaluate(
    input_fn=lambda: input_fn(test, test_y, training=False), name='DNN test')

print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

eval_result_train = DNNClassifier.evaluate(
    input_fn=lambda: input_fn(train, train_y, training=False), name='DNN train')

print('\nTrain set accuracy: {accuracy:0.3f}\n'.format(**eval_result_train))


# Predicting
# Sample Set of 3 Features and expected labels to get the predictions for
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
    'SepalLength': [5.1, 5.9, 6.9],
    'SepalWidth': [3.3, 3.0, 3.1],
    'PetalLength': [1.7, 4.2, 5.4],
    'PetalWidth': [0.5, 1.5, 2.1],
}

def input_fn_test(features, batch_size=256):
    # Convert the inputs to a Tensorflow Dataset without labels for using in the model to predict the class labels
    return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_size)

DNNPredictions = DNNClassifier.predict(
    input_fn=lambda: input_fn_test(predict_x))

LRPredictions = logistic_regression_classifier.predict(
    input_fn=lambda: input_fn_test(predict_x))

# Predictions using the Logistic Regression Model along with the probabilities
for pred_dict, expec in zip(LRPredictions, expected):
    class_id = pred_dict['class_ids'][0]
    probability = pred_dict['probabilities'][class_id]

    print('Prediction is "{}" ({:.1f}%), expected "{}"'.format(
        SPECIES[class_id], 100 * probability, expec))

# Predictions using the DNN Classifier Model along with the probabilities
for pred_dict, expec in zip(DNNPredictions, expected):
    class_id = pred_dict['class_ids'][0]
    probability = pred_dict['probabilities'][class_id]

    print('Prediction is "{}" ({:.1f}%), expected "{}"'.format(
        SPECIES[class_id], 100 * probability, expec))



CNN

RNN

猜你喜欢

转载自blog.csdn.net/hxxjxw/article/details/113408293