学习XOR

//f(x;W,c,w,b)=w*max{0, W*x+c}+b

#include <iostream>
#include <vector>
#include <algorithm>

template <class T>
double tanh(T& z) {
  double ret;
  ret = (exp(z)-exp((-1)*z))/(exp(z)+exp((-1)*z));
  return ret;
}

template <class T>
double sigmoid(T& z) {
  return 1.0f/(1.0f+exp((-1)*z));
}

template <class DataType, class WeightType>
double getMatResult(typename::std::vector<DataType> &data, typename::std::vector<WeightType> &weights) {
  double result=0.0;

  for(size_t i=0;i<data.size();++i) {
    result+=data[i]*weights[i];
  }

  if(result > 0.0)
    return 1.0;
  else
    return -0.1;
}

int main() {
  int w[][2]={{1, 1}, {1,1}};
  int bias[]={0, -1};
  int weights[] = {1, -2};
  int x[][2]={{0, 0}, {0, 1}, {1, 0}, {1, 1}};
  int c[][2]={{0, 0}, {0, 0}, {0, 0}, {0, 0}};

  /*x[4][2] * w[2][2] = c[4][2]*/
  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<2;++j) {
      int sum = 0;
      for(size_t k=0;k<2;++k) {
        sum += x[i][k] * w[k][j];
      }
      c[i][j] = sum;
    }
  }

  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<2;++j) {
      std::cout<<c[i][j]<<" ";
    }
    std::cout<<std::endl;
  }

  std::cout<<"add bias, rectified linear unit:\n";

  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<2;++j) {
      c[i][j] = c[i][j] + bias[j];
      c[i][j] = std::max(c[i][j], 0);
      std::cout<<c[i][j]<<" ";
    }
    std::cout<<std::endl;
  }

  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<1;++j) {
      int sum=0;
      for(size_t k=0;k<2;++k) {
        sum += c[i][k] * weights[k];
      }
      c[i][j] = sum;
    }
  }

  std::cout<<"the XOR result:\n";
  for(size_t i=0; i<4; ++i) {
    for(size_t j=0;j<2;++j) {
      std::cout<<x[i][j]<<" ";
    }
  std::cout<<c[i][0]<<"\n";
  }

  return 0;
}

猜你喜欢

转载自www.cnblogs.com/donggongdechen/p/9217023.html
xor