C++/Python Machine Learning—Linear Regression

1.Python

 

import numpy as np
import random
import matplotlib.pyplot as plt

# 定义预测函数
def predict(x, w, b):
    return w * x + b

# 定义训练函数
def train(x, y, w, b, learning_rate, epochs):
    n = len(x) # 获取数据集大小
    for i in range(epochs): # 迭代训练
        w_gradient = 0 # 初始化权重梯度
        b_gradient = 0 # 初始化偏置梯度
        for j in range(n): # 遍历数据集
            y_pred = predict(x[j], w, b) # 预测结果
            w_gradient += (y_pred - y[j]) * x[j] # 计算权重梯度
            b_gradient += (y_pred - y[j]) # 计算偏置梯度
        w -= (learning_rate * w_gradient) / n # 更新权重
        b -= (learning_rate * b_gradient) / n # 更新偏置
    return w, b

# 定义生成数据函数
def generate_data(w, b, num_examples):
    x = []
    y = []
    for i in range(num_examples):
        feature = np.random.normal(0.0, 1.0) # 生成特征
        x.append(feature)
        dot_product = w * feature # 计算点积
        y.append(dot_product + b + np.random.normal(0.0, 0.1)) # 添加噪声
    return x, y

# 按照线下模型定义数据,并生成随机噪声
true_w = 2
true_b = 4.2
num_examples = 1000
x, y = generate_data(true_w, true_b, num_examples)

w = 0
b = 0
learning_rate = 0.01
epochs = 1000
# 训练模型
w, b = train(x, y, w, b, learning_rate, epochs)
# 输出训练结果
print("w: ", w, ", b: ", b)
# 定义预测数据
x_pred = np.linspace(-3, 3, 100)
# 预测结果
y_pred = [predict(x_val, w, b) for x_val in x_pred]
# 绘制训练数据和预测结果
plt.scatter(x, y, s=5, label="Training Data")
plt.plot(x_pred, y_pred, color='red', label="Predicted Line")
plt.legend()
plt.show()

2. C++

#include <iostream>
#include <vector>
#include "math.h"
#include <random>

using namespace std;
// 定义预测函数
double predict(double x, double w, double b) {
    return w * x + b;
}

// 定义训练函数
void train(vector<double>& x, vector<double>& y, double& w, double& b, double learning_rate, int epochs) {
    int n = x.size(); // 获取数据集大小
    for (int i = 0; i < epochs; i++) { // 迭代训练
        double w_gradient = 0; // 初始化权重梯度
        double b_gradient = 0; // 初始化偏置梯度
        for (int j = 0; j < n; j++) { // 遍历数据集
            double y_pred = predict(x[j], w, b); // 预测结果
            w_gradient += (y_pred - y[j]) * x[j]; // 计算权重梯度
            b_gradient += (y_pred - y[j]); // 计算偏置梯度
        }
        w -= (learning_rate * w_gradient) / n; // 更新权重
        b -= (learning_rate * b_gradient) / n; // 更新偏置
   }
}


// 定义生成数据函数
void generate_data(double w, double b, int num_examples, vector<double>& x, vector<double>& y) {
    default_random_engine generator; // 定义随机数生成器
    normal_distribution<double> distribution(0.0, 1.0); // 定义正态分布
    for (int i = 0; i < num_examples; i++) { // 生成数据
        double feature = distribution(generator); // 生成特征
        x.push_back(feature);
        double dot_product = w * feature; // 计算点积
        y.push_back(dot_product + b + distribution(generator)); // 添加噪声
    }
}

int main() {
    // 按照线下模型定义数据,并生成随机噪声
    double true_w = 2;
    double true_b = 4.2;
    vector<double> x;
    vector<double> y;
    int num_examples = 1000;
    generate_data(true_w, true_b, num_examples, x, y);
    
    double w = 0;
    double b = 0;
    double learning_rate = 0.01;
    int epochs = 1000;
    // 训练模型
    train(x, y, w, b, learning_rate, epochs);
    // 输出训练结果
    cout << "w: " << w << ", b: " << b << std::endl; // 输出训练结果
    // 定义预测数据
    vector<double> x_pred = {6, 7, 8, 9, 10};
    vector<double> y_pred;
    // 预测结果
    for (double x_val : x_pred) {
        y_pred.push_back(predict(x_val, w, b));
    }
    // 输出预测结果
    cout << "Input values: ";
    for (double x_val : x_pred) {
        cout << x_val << " ";
    }
    cout << std::endl;
    
    cout << "Predicted values: ";
    for (double y_val : y_pred) {
        cout << y_val << " ";
    }
    cout << endl;

    return 0;
}


Guess you like

Origin blog.csdn.net/seek97/article/details/129800444