spark(八)-Executor运行一个task

  1. Executor launchTask()
    def launchTask(context: ExecutorBackend, taskDescription: TaskDescription): Unit = {
    val tr = new TaskRunner(context, taskDescription)
    runningTasks.put(taskDescription.taskId, tr)
    threadPool.execute(tr)
    }
  2. TaskRunner.run()
    override def run(): Unit = {
      threadId = Thread.currentThread.getId
      Thread.currentThread.setName(threadName)
      val threadMXBean = ManagementFactory.getThreadMXBean
      val taskMemoryManager = new TaskMemoryManager(env.memoryManager, taskId)
      val deserializeStartTime = System.currentTimeMillis()
      val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
        threadMXBean.getCurrentThreadCpuTime
      } else 0L
      Thread.currentThread.setContextClassLoader(replClassLoader)
      // 反序列化器
      val ser = env.closureSerializer.newInstance()
      logInfo(s"Running $taskName (TID $taskId)")
      // ExecutorBackend更新状态为RUNNING
      execBackend.statusUpdate(taskId, TaskState.RUNNING, EMPTY_BYTE_BUFFER)
      var taskStartTime: Long = 0
      var taskStartCpu: Long = 0
      // 返回当前this JVM process has spent in garbage collection的总时间,
      // 两次总时间的差就是运行
      startGCTime = computeTotalGcTime()

      try {
        // Must be set before updateDependencies() is called, in case fetching dependencies
        // requires access to properties contained within (e.g. for access control).
        Executor.taskDeserializationProps.set(taskDescription.properties)
        // 下载缺失的依赖包、Jar包等文件
        updateDependencies(taskDescription.addedFiles, taskDescription.addedJars)
        // 从taskDescription中,用能够获取同一个项目中java代码和其静态资源文件的ClassLoader
        // 反序列化出Task,可能是ShuffleMapTask或ResultTask
        task = ser.deserialize[Task[Any]](
          taskDescription.serializedTask, Thread.currentThread.getContextClassLoader)
        task.localProperties = taskDescription.properties
        task.setTaskMemoryManager(taskMemoryManager)
        
        // ...... 
        
        // 运行真正的task并测量运行时间
        taskStartTime = System.currentTimeMillis()
        taskStartCpu = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
          threadMXBean.getCurrentThreadCpuTime
        } else 0L
        var threwException = true
        
        val value = Utils.tryWithSafeFinally {
          // 调用task的run(), Task封装一些上下文信息,执行抽象方法:runTask(context)
          val res = task.run(
            taskAttemptId = taskId,
            attemptNumber = taskDescription.attemptNumber,
            metricsSystem = env.metricsSystem)
          threwException = false
          // 返回运行结果给value(重要!)
          res
        } {
          // 异常处理...
        }
        
        // 其他收尾工作
        
        // 把value序列化成字节流
        val valueBytes = resultSer.serialize(value)
        // 封装成DirectTaskResult
        val directResult = new DirectTaskResult(valueBytes, accumUpdates)
        val serializedDirectResult = ser.serialize(directResult)
        // 然后把serializedDirectResult封装成
        //       ser.serialize(new IndirectTaskResult[Any](blockId, resultSize))
        // 或者结果较小时,直接用serializedDirectResult
        // 最后的最后,sending directly back to the driver
        execBackend.statusUpdate(taskId, TaskState.FINISHED, serializedResult)
  1. Task.run()
    runTask()

    继承自Task的有ResultTask和ShuffleMapTask。先看ResultTask,就是把(rdd, func)序列化出来,调用rdd.iterator()去执行。

     override def runTask(context: TaskContext): U = {
    // Deserialize the RDD and the func using the broadcast variables.
    val threadMXBean = ManagementFactory.getThreadMXBean
    val deserializeStartTime = System.currentTimeMillis()
    val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime
    } else 0L
    val ser = SparkEnv.get.closureSerializer.newInstance()
    val (rdd, func) = ser.deserialize[(RDD[T], (TaskContext, Iterator[T]) => U)](
      ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
    _executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
    _executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
    } else 0L
  
    func(context, rdd.iterator(partition, context))
  }
  1. ShuffleMapTask.runTask()

千回百转,这里短短几句就执行完了用户Application的所有sao操作:

  • 反序列化出RDD和RDD的依赖关系,
  • 调用rdd.iterator(partition, context)在每个分区上执行用户自定义的函数序列,
  • 然后从shuffleManager中取出一个ShuffleWriter,
  • 最后用这个writer把rdd迭代器运行的结果写下来,索引为MapStatus返回,作为TaskRunner.run()中的 valueBytes/directResult/serializedResult。

返回的MapStatus可以看作是内存/磁盘中的中间结果的元信息,包括BlockId及其大小。
MapStatus的主要作用用于给ShuffleMapTask返回TaskScheduler的执行结果。

  override def runTask(context: TaskContext): MapStatus = {
    // Deserialize the RDD using the broadcast variable.
    val threadMXBean = ManagementFactory.getThreadMXBean
    val deserializeStartTime = System.currentTimeMillis()
    val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime
    } else 0L
    val ser = SparkEnv.get.closureSerializer.newInstance()
    val (rdd, dep) = ser.deserialize[(RDD[_], ShuffleDependency[_, _, _])](
      ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
    _executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
    _executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
    } else 0L

    var writer: ShuffleWriter[Any, Any] = null
    try {
      // SortShuffleManager.getWriter[K, V](),可能return一个SortShuffleWriter.
      val manager = SparkEnv.get.shuffleManager
      writer = manager.getWriter[Any, Any](dep.shuffleHandle, partitionId, context)
      // SortShuffleWriter.write()时,会创建一个MapStatus,下面stop()时,在此返回这个MapStatus.
      writer.write(rdd.iterator(partition, context).asInstanceOf[Iterator[_ <: Product2[Any, Any]]])
      writer.stop(success = true).get
    } catch {
      // ......
    }
  1. SortShuffleWriter.write()

org.apache.spark.shuffle.sort.SortShuffleWriter.sala

将每个ShuffleMapTasks计算出来的RDD的partition数据,写入本地磁盘

  /** Write a bunch of records to this task's output */
  override def write(records: Iterator[Product2[K, V]]): Unit = {
    sorter = if (dep.mapSideCombine) {
      new ExternalSorter[K, V, C](
        context, dep.aggregator, Some(dep.partitioner), dep.keyOrdering, dep.serializer)
    } else {
      // In this case we pass neither an aggregator nor an ordering to the sorter, because we don't
      // care whether the keys get sorted in each partition; that will be done on the reduce side
      // if the operation being run is sortByKey.
      new ExternalSorter[K, V, V](
        context, aggregator = None, Some(dep.partitioner), ordering = None, dep.serializer)
    }
    sorter.insertAll(records)

    // 
    val output = shuffleBlockResolver.getDataFile(dep.shuffleId, mapId)
    val tmp = Utils.tempFileWith(output)
    try {
      val blockId = ShuffleBlockId(dep.shuffleId, mapId, IndexShuffleBlockResolver.NOOP_REDUCE_ID)
      val partitionLengths = sorter.writePartitionedFile(blockId, tmp)
      shuffleBlockResolver.writeIndexFileAndCommit(dep.shuffleId, mapId, partitionLengths, tmp)
      mapStatus = MapStatus(blockManager.shuffleServerId, partitionLengths)
    } finally {
      if (tmp.exists() && !tmp.delete()) {
        logError(s"Error while deleting temp file ${tmp.getAbsolutePath}")
      }
    }
  }
  1. ExternalSorter.insertAll()

这里会根据创建ExternalSorter时,是否传入了聚合器aggregator,也就是是否需要 mapSideCombine 来决定是否先进行Combine values in-memory first using our AppendOnlyMap,然后再把数据写入内存buffer,maybe还要Spill(溢写)到磁盘上。

  def insertAll(records: Iterator[Product2[K, V]]): Unit = {
    // TODO: stop combining if we find that the reduction factor isn't high
    val shouldCombine = aggregator.isDefined

    if (shouldCombine) {
      // Combine values in-memory first using our AppendOnlyMap
      val mergeValue = aggregator.get.mergeValue
      val createCombiner = aggregator.get.createCombiner
      var kv: Product2[K, V] = null
      val update = (hadValue: Boolean, oldValue: C) => {
        if (hadValue) mergeValue(oldValue, kv._2) else createCombiner(kv._2)
      }
      while (records.hasNext) {
        addElementsRead()
        kv = records.next()
        map.changeValue((getPartition(kv._1), kv._1), update)
        maybeSpillCollection(usingMap = true)
      }
    } else {
      // Stick values into our buffer
      while (records.hasNext) {
        addElementsRead()
        val kv = records.next()
        buffer.insert(getPartition(kv._1), kv._1, kv._2.asInstanceOf[C])
        maybeSpillCollection(usingMap = false)
      }
    }
  }

猜你喜欢

转载自blog.csdn.net/rover2002/article/details/106131688