基于spark实现kmeans的更新聚类中心的关键代码

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u011707542/article/details/78445413
 /**
    * 这个方法作用是返回被给样本的所属聚类中心的索引,及其到这个聚类中心的距离,
    * 其中class VectorWithNorm(val vector: Vector, val norm: Double)这是VectorWithNorm的定义,vector为样本构成的向量形式,norm是p-norm值
   */
  private[mllib] def findClosest(
      centers: TraversableOnce[VectorWithNorm],
      point: VectorWithNorm): (Int, Double) = {
    var bestDistance = Double.PositiveInfinity
    var bestIndex = 0
    var i = 0
    centers.foreach { center =>
      // Since `\|a - b\| \geq |\|a\| - \|b\||`, we can use this lower bound to avoid unnecessary
      // distance computation.
      var lowerBoundOfSqDist = center.norm - point.norm
      lowerBoundOfSqDist = lowerBoundOfSqDist * lowerBoundOfSqDist
      if (lowerBoundOfSqDist < bestDistance) {
        val distance: Double = fastSquaredDistance(center, point)
        if (distance < bestDistance) {
          bestDistance = distance
          bestIndex = i
        }
      }
      i += 1
    }
    (bestIndex, bestDistance)
  }
  /**
   * 下面是实现kmeans算法代码
   * instr参数解释一下这个在mllib里面默认设置为None,之所以存在这个参数是因为ML库中调用到了,因此传入这个参数其实是适应ML里面pipline的思想 ,Instrumentation是一个封装器,记录某个训练session期间有用的信息,class Instrumentation[E <: Estimator[_]] ,同时默认的初始点选择方法非随机,而是使用kmean++的方法进行选择。
   * VectorWithNorm(val vector: Vector, val norm: Double)
   */
  private def runAlgorithm(
      data: RDD[VectorWithNorm],
      instr: Option[Instrumentation[NewKMeans]]): KMeansModel = {

    val sc = data.sparkContext

    val initStartTime = System.nanoTime()
    //初始化聚类中心
    val centers = initialModel match {
      case Some(kMeansCenters) =>
        kMeansCenters.clusterCenters.map(new VectorWithNorm(_))
      case None =>
        if (initializationMode == KMeans.RANDOM) {
          initRandom(data)
        } else {
          initKMeansParallel(data)
        }
    }
    val initTimeInSeconds = (System.nanoTime() - initStartTime) / 1e9
    logInfo(f"Initialization with $initializationMode took $initTimeInSeconds%.3f seconds.")

    var converged = false
    var cost = 0.0
    var iteration = 0

    val iterationStartTime = System.nanoTime()

    instr.foreach(_.logNumFeatures(centers.head.vector.size))

    // Execute iterations of Lloyd's algorithm until converged 累加器使用  广播聚类中心变量
    while (iteration < maxIterations && !converged) {
      val costAccum = sc.doubleAccumulator
      val bcCenters = sc.broadcast(centers)

      // 找到每个聚类中心拥有的样本数量   dims为向量即样本的特征维度,此处采用了mapPartitions 会对每个分区中的元素进行相同操作每个分区包含多个样本即下面代码中的points ,不同于map是对RDD中每个元素进行操作
      val totalContribs = data.mapPartitions { points =>
      //bcCenters.value得到Array[VectorWithNorm]即获得广播变量的值
        val thisCenters = bcCenters.value
        //VectorWithNorm(val vector: Vector, val norm: Double);thisCenters为Array[VectorWithNorm]
        val dims = thisCenters.head.vector.size

        val sums = Array.fill(thisCenters.length)(Vectors.zeros(dims))
        //创建了一个数组存放每个聚类中心对应的样本数目
        val counts = Array.fill(thisCenters.length)(0L)   
//在每个分区中对每个样本进行下面操作,寻找其最优的所属中心bestCenter,及其距离cost,然后对应的counts数组对应的bestCenter索引位置+1,该聚类中心样本数多1
        points.foreach { point =>
          val (bestCenter, cost) = KMeans.findClosest(thisCenters, point)
          costAccum.add(cost)
          val sum = sums(bestCenter)
          axpy(1.0, point.vector, sum)
          counts(bestCenter) += 1
        }

        counts.indices.filter(counts(_) > 0).map(j => (j, (sums(j), counts(j)))).iterator
      }.reduceByKey { case ((sum1, count1), (sum2, count2)) =>
        axpy(1.0, sum2, sum1)   //sum1 += 1.0 * sum2
        (sum1, count1 + count2)
      }.collectAsMap()   
       //collectAsMap()后得到了一个totalContribs变量格式为[key,(vector,long)]的map  其中key为聚类中心索引 long为该聚类中心样本数目,sum1 sum2均为向量,此处应该是通过reducebykey将集群中相同key的进行合并最终统计出每个聚类中心索引key对应的键值对形式的记录key,(vector,long)]  其中key为聚类中心索引,vector为该聚类中心所对应的所有样本每一个维度的和组成的向量,long代表了该聚类中心样本数目

      bcCenters.destroy(blocking = false)

      // Update the cluster centers and costs   每个聚类中心保存了其样本每一维度的和及其样本个数 用来更新聚类中心  里面的sum为聚类中心
      converged = true
      totalContribs.foreach { case (j, (sum, count)) =>
      //scal函数作用   sum*= (1.0 / count )*sum  sum是一个向量将向量的每一维度乘以样本个数得到每一维度平均值,即新的聚类中心
        scal(1.0 / count, sum)
        val newCenter = new VectorWithNorm(sum)
        if (converged && KMeans.fastSquaredDistance(newCenter, centers(j)) > epsilon * epsilon) {
          converged = false
        }
        centers(j) = newCenter
      }

      cost = costAccum.value
      iteration += 1
    }

    val iterationTimeInSeconds = (System.nanoTime() - iterationStartTime) / 1e9
    logInfo(f"Iterations took $iterationTimeInSeconds%.3f seconds.")

    if (iteration == maxIterations) {
      logInfo(s"KMeans reached the max number of iterations: $maxIterations.")
    } else {
      logInfo(s"KMeans converged in $iteration iterations.")
    }

    logInfo(s"The cost is $cost.")

    new KMeansModel(centers.map(_.vector))
  }

猜你喜欢

转载自blog.csdn.net/u011707542/article/details/78445413