基于OpenCV的神经网络简介

人工神经网络

隐藏层神经元数目的经验法:

 1:如果输入与输出层的大小相差很大,则隐藏层的神经元数目最好与输出层更接近

2:对于相对较小的输入层,隐藏神经元最好是输入层和输出层大小之和的三分之二,或者小于输入层大小的两倍

简单例子1:

    这只是一个建大的例子,这种分类其实没有意义,但可以测试网络是否可以正常运行,在这段代码中,只提供了一个训练记录,它的类标签为5,这个网络会用来判断输入数据的类标签是否为5

import cv2
import numpy as np

ann = cv2.ml.ANN_MLP_create() #创建感知器
ann.setLayerSizes(np.array([9, 5, 9], dtype=np.uint8)) #设置拓扑结构 输入层 隐藏层 输出层的个数
ann.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) #训练模式,采用反向传播方式 另外一种RPROP也是反向传播算法,这两种类型 只能在有监督学习中才可以设置
"""
 train函数包含三个参数:
     samples,layout和reponses
         只有samples是必须设置的参数,另外两个为可选参数
         只用了samples参数训练的统计模块,hi采用无监督学习算法,如果提供了layout和responses参数就是有监督学习    
"""
ann.train(np.array([[1.2, 1.3, 1.9, 2.2, 2.3, 2.9, 3.0, 3.2, 3.3]], dtype=np.float32),
          cv2.ml.ROW_SAMPLE,
          np.array([[0,0,0,0,1,0,0,0,0]],  
          dtype=np.float32)
          )
print (ann.predict(np.array([[1.4, 1.5, 1.2, 2., 2.5, 2.8, 3., 3.1, 3.8]], dtype=np.float32)))

输出:

(5.0, array([[-0.06419383, -0.13360272, -0.1681568 , -0.18708915,  0.0970564 ,
         0.89237726,  0.05093023,  0.17537238,  0.13388439]], dtype=float32))

基于ANN的动物分类

    假设输入统计量:体重,长度,牙齿,输入的都是无食用价值的数据,并且只考虑了训练数据集的大小/训练迭代系数。通过这些结果可以看到ANN对那些类产生了过拟合,因此,提高训练过程中输入的数据质量很重要。

    每次运行的结果都不相同

import cv2
import numpy as np
from random import randint

animals_net = cv2.ml.ANN_MLP_create()
animals_net.setTrainMethod(cv2.ml.ANN_MLP_RPROP | cv2.ml.ANN_MLP_UPDATE_WEIGHTS)
animals_net.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM)
animals_net.setLayerSizes(np.array([3, 6, 4]))
animals_net.setTermCriteria(( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ))

"""Input arrays
weight, length, teeth
"""

"""Output arrays
dog, eagle, dolphin and dragon
"""

def dog_sample():
  return [randint(10, 20), 1, randint(38, 42)]

def dog_class():
  return [1, 0, 0, 0]

def condor_sample():
  return [randint(3,10), randint(3,5), 0]

def condor_class():
  return [0, 1, 0, 0]

def dolphin_sample():
  return [randint(30, 190), randint(5, 15), randint(80, 100)]

def dolphin_class():
  return [0, 0, 1, 0]

def dragon_sample():
  return [randint(1200, 1800), randint(30, 40), randint(160, 180)]

def dragon_class():
  return [0, 0, 0, 1]

def record(sample, classification):
  return (np.array([sample], dtype=np.float32), np.array([classification], dtype=np.float32))

records = []

"""
SAMPLES = 5000
for x in range(0, SAMPLES):
  print "Samples %d/%d" % (x, SAMPLES)
  animals_net.train(np.array([dog_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE, np.array([dog_class()], dtype=np.float32))
  animals_net.train(np.array([condor_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE, np.array([condor_class()], dtype=np.float32))
  animals_net.train(np.array([dolphin_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE, np.array([dolphin_class()], dtype=np.float32))
  animals_net.train(np.array([dragon_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE, np.array([dragon_class()], dtype=np.float32))
"""

RECORDS = 5000
for x in range(0, RECORDS):
  records.append(record(dog_sample(), dog_class()))
  records.append(record(condor_sample(), condor_class()))
  records.append(record(dolphin_sample(), dolphin_class()))
  records.append(record(dragon_sample(), dragon_class()))

EPOCHS = 2
for e in range(0, EPOCHS):
  print ("Epoch %d:" % e)
  for t, c in records:
    animals_net.train(t, cv2.ml.ROW_SAMPLE, c)


TESTS = 100
dog_results = 0
for x in range(0, TESTS):
  clas = int(animals_net.predict(np.array([dog_sample()], dtype=np.float32))[0])
  print ("class dog: %d" % clas)
  if (clas) == 0:
    dog_results += 1

condor_results = 0
for x in range(0, TESTS):
  clas = int(animals_net.predict(np.array([condor_sample()], dtype=np.float32))[0])
  print ("class condor: %d" % clas)
  if (clas) == 1:
    condor_results += 1

dolphin_results = 0
for x in range(0, TESTS):
  clas = int(animals_net.predict(np.array([dolphin_sample()], dtype=np.float32))[0])
  print ("class dolphin: %d" % clas)
  if (clas) == 2:
    dolphin_results += 1

dragon_results = 0
for x in range(0, TESTS):
  clas = int(animals_net.predict(np.array([dragon_sample()], dtype=np.float32))[0])
  print ("class dragon: %d" % clas)
  if (clas) == 3:
    dragon_results += 1

print ("Dog accuracy: %f%%" % (dog_results))
print ("condor accuracy: %f%%" % (condor_results))
print ("dolphin accuracy: %f%%" % (dolphin_results))
print ("dragon accuracy: %f%%" % (dragon_results))

输出:

Epoch 0:
Epoch 1:
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class dog: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class condor: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dolphin: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 3
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 2
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 3
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 0
class dragon: 3
class dragon: 0
class dragon: 3
Dog accuracy: 100.000000%
condor accuracy: 0.000000%
dolphin accuracy: 0.000000%
dragon accuracy: 21.000000%

没下载到源码中的字符数据集MNIST ,暂时在这边先做个程序记录

案例1:

digits_ann.py

import cv2
import cPickle
import numpy as np
import gzip

"""OpenCV ANN Handwritten digit recognition example

Wraps OpenCV's own ANN by automating the loading of data and supplying default paramters,
such as 20 hidden layers, 10000 samples and 1 training epoch.

The load data code is taken from http://neuralnetworksanddeeplearning.com/chap1.html
by Michael Nielsen
"""

def load_data():
  mnist = gzip.open('./data/mnist.pkl.gz', 'rb')
  training_data, classification_data, test_data = cPickle.load(mnist)
  mnist.close()
  return (training_data, classification_data, test_data)

def wrap_data():
  tr_d, va_d, te_d = load_data()
  training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
  training_results = [vectorized_result(y) for y in tr_d[1]]
  training_data = zip(training_inputs, training_results)
  validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
  validation_data = zip(validation_inputs, va_d[1])
  test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
  test_data = zip(test_inputs, te_d[1])
  return (training_data, validation_data, test_data)

def vectorized_result(j):
  e = np.zeros((10, 1))
  e[j] = 1.0
  return e

def create_ANN(hidden = 20):
  ann = cv2.ml.ANN_MLP_create()
  ann.setLayerSizes(np.array([784, hidden, 10]))
  ann.setTrainMethod(cv2.ml.ANN_MLP_RPROP)
  ann.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM)
  ann.setTermCriteria(( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 1 ))
  return ann

def train(ann, samples = 10000, epochs = 1):
  tr, val, test = wrap_data()
  
  
  for x in xrange(epochs):
    counter = 0
    for img in tr:
      
      if (counter > samples):
        break
      if (counter % 1000 == 0):
        print "Epoch %d: Trained %d/%d" % (x, counter, samples)
      counter += 1
      data, digit = img
      ann.train(np.array([data.ravel()], dtype=np.float32), cv2.ml.ROW_SAMPLE, np.array([digit.ravel()], dtype=np.float32))
    print "Epoch %d complete" % x
  return ann, test
  
def test(ann, test_data):
  sample = np.array(test_data[0][0].ravel(), dtype=np.float32).reshape(28, 28)
  cv2.imshow("sample", sample)
  cv2.waitKey()
  print ann.predict(np.array([test_data[0][0].ravel()], dtype=np.float32))

def predict(ann, sample):
  resized = sample.copy()
  rows, cols = resized.shape
  if (rows != 28 or cols != 28) and rows * cols > 0:
    resized = cv2.resize(resized, (28, 28), interpolation = cv2.INTER_LINEAR)
  return ann.predict(np.array([resized.ravel()], dtype=np.float32))

"""
usage:
ann, test_data = train(create_ANN())
test(ann, test_data)
"""

digits_image_process.py

import cv2
import numpy as np
import digits_ann as ANN

def inside(r1, r2):
  x1,y1,w1,h1 = r1
  x2,y2,w2,h2 = r2
  if (x1 > x2) and (y1 > y2) and (x1+w1 < x2+w2) and (y1+h1 < y2 + h2):
    return True
  else:
    return False

def wrap_digit(rect):
  x, y, w, h = rect
  padding = 5
  hcenter = x + w/2
  vcenter = y + h/2
  roi = None
  if (h > w):
    w = h
    x = hcenter - (w/2)
  else:
    h = w
    y = vcenter - (h/2)
  return (x-padding, y-padding, w+padding, h+padding)

# ann, test_data = ANN.train(ANN.create_ANN(56), 50000, 5)
ann, test_data = ANN.train(ANN.create_ANN(58), 50000, 5)
font = cv2.FONT_HERSHEY_SIMPLEX

# path = "./images/MNISTsamples.png"
path = "./images/numbers.jpg"
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bw = cv2.GaussianBlur(bw, (7,7), 0)
ret, thbw = cv2.threshold(bw, 127, 255, cv2.THRESH_BINARY_INV)
thbw = cv2.erode(thbw, np.ones((2,2), np.uint8), iterations = 2)
image, cntrs, hier = cv2.findContours(thbw.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

rectangles = []

for c in cntrs:
  r = x,y,w,h = cv2.boundingRect(c)
  a = cv2.contourArea(c)
  b = (img.shape[0]-3) * (img.shape[1] - 3)
  
  is_inside = False
  for q in rectangles:
    if inside(r, q):
      is_inside = True
      break
  if not is_inside:
    if not a == b:
      rectangles.append(r)

for r in rectangles:
  x,y,w,h = wrap_digit(r) 
  cv2.rectangle(img, (x,y), (x+w, y+h), (0, 255, 0), 2)
  roi = thbw[y:y+h, x:x+w]
  
  try:
    digit_class = int(ANN.predict(ann, roi.copy())[0])
  except:
    continue
  cv2.putText(img, "%d" % digit_class, (x, y-1), font, 1, (0, 255, 0))

cv2.imshow("thbw", thbw)
cv2.imshow("contours", img)
cv2.imwrite("sample.jpg", img)
cv2.waitKey()

案例2:

digit_recongnition.py

# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD

# Standard scientific Python imports
import pylab as pl

# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics

# The digits dataset
digits = datasets.load_digits()

# The data that we are interested in is made of 8x8 images of digits,
# let's have a look at the first 3 images, stored in the `images`
# attribute of the dataset. If we were working from image files, we
# could load them using pylab.imread. For these images know which
# digit they represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
    pl.subplot(2, 4, index + 1)
    pl.axis('off')
    pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
    pl.title('Training: %i' % label)

# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))

# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)

# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])

# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])

print "Classification report for classifier %s:\n%s\n" % (
    classifier, metrics.classification_report(expected, predicted))
print "Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)

for index, (image, prediction) in enumerate(
    zip(digits.images[n_samples / 2:], predicted)[:4]):
    pl.subplot(2, 4, index + 5)
    pl.axis('off')
    pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
    pl.title('Prediction: %i' % prediction)

pl.show()


猜你喜欢

转载自blog.csdn.net/zhuisaozhang1292/article/details/80932188
今日推荐