PySpark Machine Learning (1) - Random Forest

This paper mainly implements the random forest algorithm in the PySpark environment:

% pyspark

from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import StringIndexer
from pyspark.ml.classification import RandomForestClassifier
from pyspark.sql import Row

#1. Read the csv file and fill the null values ​​with 0
data = spark.sql("""select * from XXX""")

#2. Construct the training dataset
dataSet = data.na.fill('0').rdd.map(list)

(trainData, testData) = dataSet.randomSplit ([0.7, 0.3])
#print(trainData.take(1))
trainingSet = trainData.map(lambda x:Row(label=x[-1], features=Vectors.dense(x[:-1]))).toDF()

train_num = trainingSet.count()
print("Number of training samples:{}".format(train_num))
print(trainingSet.show())

#3. Training with random forests
stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
si_model = stringIndexer.fit(trainingSet)
tf = si_model.transform(trainingSet)
tf.show()

rf = RandomForestClassifier(numTrees=10, maxDepth=8, labelCol="indexed", seed=42)
rfcModel = rf.fit(tf)

#Output model feature importance, subtree weight
print("Model feature importance:{}".format(rfcModel.featureImportances))
print("Number of model features:{}".format(rfcModel.numFeatures))

#4. Test
testSet = testData.map(lambda x:Row(label=x[-1], features=Vectors.dense(x[:-1]))).toDF()

print("Number of test samples:{}".format(testSet.count()))
print(testSet.show())

si_model = stringIndexer.fit(testSet)
test_tf = si_model.transform(testSet)

result = rfcModel.transform(test_tf)
result.show()

total_amount=result.count()
correct_amount = result.filter(result.indexed==result.prediction).count()
precision_rate = correct_amount/total_amount
print("The prediction accuracy rate is: {}".format(precision_rate))

 

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324814173&siteId=291194637