spark源码分析之worker原理篇

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/weixin_39478115/article/details/79322816

这里写图片描述
解释:
1、master要求worker启动driver和executor
2、worker启动driver的一个基本的原理,worker会启动一个线程DriverRunner,然后DriverRunner会去负责启动driver进程,然后在之后对driver进程进行管理
3、worker启动executor的一个基本的原理,worker会启动一个线程ExecutorRunner,然后ExecutorRunner会去负责启动executor进程,然后在之后对executor进程进行管理
4、driver首先创建driver的工作目录,封装启动driver的命令,用ProcessBuilder启动Driver
5、executor首先创建executor的工作目录,封装启动executor的命令,用ProcessBuilder启动executor,executor找到对应的driver,去反向注册自己,然后就可以启动executor

schedule方法:
源码位置:org/apache/spark/deploy/master/Master.scala

/**
 * 资源调度算法
 */
private def schedule(): Unit = {
  // 首先判断master的状态不是alive的话直接返回
  // 也就是说,standby master是不会进行application等资源的调度的
  if (state != RecoveryState.ALIVE) { return }

  // Drivers take strict precedence over executors
  // random shuffle的原理是对传入的集合的元素进行随机的打乱
  // 这里是对取出workers中所有注册上来上的worker,进行过滤,必须是状态为alive的worker
  // 对状态为alive的worker,调用rondom的shuffle方法进行随机的打乱

  // 意思就是从ArrayBuffer的最后一个元素开始到第三个元素,对于每个元素,都会取出该范围内的随机数,
  // 比如说buf.length为10,然后next会取0到10的一个随机数,然后就会把buf随机的一个位置和该数字进行交换
  val shuffledWorkers = Random.shuffle(workers) // Randomization helps balance drivers

  //  遍历活着的worker
  for (worker <- shuffledWorkers if worker.state == WorkerState.ALIVE) {
    // 遍历等待的driver,只有yarn-cluster和standalone的cluster模式提交的时候,才会注册driver,其他方式都是在
    // 本地启动driver,而不是来注册driver,更不可能让master来调度driver
    for (driver <- waitingDrivers) {
      // 判断当前的worker的空闲内存量大于等于driver需要的内存量和判断worker的空闲cpu数大于等于driver需要的cpu数量
      if (worker.memoryFree >= driver.desc.mem && worker.coresFree >= driver.desc.cores) {
        // 启动driver
        launchDriver(worker, driver)
        // 将driver从等待的driver的队列中删除
        waitingDrivers -= driver
      }
    }
  }
  // 在workers上启动和调度executor
  startExecutorsOnWorkers()
}

master启动并管理driver进程源码分析:
第一步:调用schedule方法的launchDriver方法
源码位置:org/apache/spark/deploy/master/Master.scala

/**
 * 在某一个worker上启动driver
 */
private def launchDriver(worker: WorkerInfo, driver: DriverInfo) {
  logInfo("Launching driver " + driver.id + " on worker " + worker.id)
  // 将driver加入到worker内部的缓冲结构中
  // 将worker中使用的内存和cpu的数量,都加上driver需要的内存和cpu的数量
  worker.addDriver(driver)
  // 将worker加入到driver的内存缓冲结构中
  driver.worker = Some(worker)
  // 调用worker的actor,给worker发送注册driver的信息
  worker.endpoint.send(LaunchDriver(driver.id, driver.desc))
  driver.state = DriverState.RUNNING
}

第二步:调用worker的launchDriver方法
源码位置:org/apache/spark/deploy/worker/Worker.scala

// 启动driver
case LaunchDriver(driverId, driverDesc) => {
  logInfo(s"Asked to launch driver $driverId")
  val driver = new DriverRunner(
    conf,
    driverId,
    workDir,
    sparkHome,
    driverDesc.copy(command = Worker.maybeUpdateSSLSettings(driverDesc.command, conf)),
    self,
    workerUri,
    securityMgr)
  drivers(driverId) = driver
  driver.start()

  coresUsed += driverDesc.cores
  memoryUsed += driverDesc.mem
}

第三步:调用第二步的DriverRunner方法
源码位置:org/apache/spark/deploy/worker/DriverRunner.scala

/** Starts a thread to run and manage the driver. */
private[worker] def start() = {
  /**
   * 创建线程
   */
  new Thread("DriverRunner for " + driverId) {
    override def run() {
      try {
        // 第一步:创建driver的工作目录
        val driverDir = createWorkingDirectory()

        // 第二步:下载用户上传的jar
        val localJarFilename = downloadUserJar(driverDir)

        def substituteVariables(argument: String): String = argument match {
          case "{{WORKER_URL}}" => workerUrl
          case "{{USER_JAR}}" => localJarFilename
          case other => other
        }

        // TODO: If we add ability to submit multiple jars they should also be added here
        // 构建ProcessBuilder
        // 传入了driver的启动命令、需要的内存大小等信息
        val builder = CommandUtils.buildProcessBuilder(driverDesc.command, securityManager,
          driverDesc.mem, sparkHome.getAbsolutePath, substituteVariables)
        // 通过ProcessBuilder启动driver
        launchDriver(builder, driverDir, driverDesc.supervise)
      }
      catch {
        case e: Exception => finalException = Some(e)
      }

      // 对driver的退出状态做一些处理
      val state =
        if (killed) {
          DriverState.KILLED
        } else if (finalException.isDefined) {
          DriverState.ERROR
        } else {
          finalExitCode match {
            case Some(0) => DriverState.FINISHED
            case _ => DriverState.FAILED
          }
        }

      finalState = Some(state)

      // 这个DriverRunner线程,向它所属的worker的actor,发送一个DriverStateChanged的事件
      worker.send(DriverStateChanged(driverId, state, finalException))
    }
  }.start()
}

第四步:调用第三步的launchDriver方法
源码位置:org/apache/spark/deploy/worker/DriverRunner.scala

private def launchDriver(builder: ProcessBuilder, baseDir: File, supervise: Boolean) {
  builder.directory(baseDir)
  def initialize(process: Process): Unit = {
    // Redirect stdout and stderr to files
    // 重定向stdout和stderr文件
    val stdout = new File(baseDir, "stdout")
    CommandUtils.redirectStream(process.getInputStream, stdout)

    val stderr = new File(baseDir, "stderr")
    val formattedCommand = builder.command.asScala.mkString("\"", "\" \"", "\"")
    val header = "Launch Command: %s\n%s\n\n".format(formattedCommand, "=" * 40)
    Files.append(header, stderr, UTF_8)
    CommandUtils.redirectStream(process.getErrorStream, stderr)
  }
  // 调用waitFor函数,把driver进程启动起来
  runCommandWithRetry(ProcessBuilderLike(builder), initialize, supervise)
}

第五步:调用第四步的runCommandWithRetry方法
源码位置:org/apache/spark/deploy/worker/DriverRunner.scala

def runCommandWithRetry(
    command: ProcessBuilderLike, initialize: Process => Unit, supervise: Boolean): Unit = {
  // Time to wait between submission retries.
  // 在提交重试之间等待时间。
  var waitSeconds = 1
  // A run of this many seconds resets the exponential back-off.
  val successfulRunDuration = 5

  var keepTrying = !killed

  while (keepTrying) {
    logInfo("Launch Command: " + command.command.mkString("\"", "\" \"", "\""))

    synchronized {
      if (killed) { return }
      process = Some(command.start())
      initialize(process.get)
    }

    val processStart = clock.getTimeMillis()
    // 启动进程
    val exitCode = process.get.waitFor()
    if (clock.getTimeMillis() - processStart > successfulRunDuration * 1000) {
      waitSeconds = 1
    }

    if (supervise && exitCode != 0 && !killed) {
      logInfo(s"Command exited with status $exitCode, re-launching after $waitSeconds s.")
      sleeper.sleep(waitSeconds)
      waitSeconds = waitSeconds * 2 // exponential back-off
    }

    keepTrying = supervise && exitCode != 0 && !killed
    finalExitCode = Some(exitCode)
  }
}
}

master启动并管理executor进程源码分析:
第一步:调用schedule方法的startExecutorsOnWorkers
源码位置:org/apache/spark/deploy/master/Master.scala

/**
 * Schedule and launch executors on workers
 */
/**
 * Application的调度机制,默认是SpreadOutApps算法,另外一种是非SpreadOutApps算法
 */
private def startExecutorsOnWorkers(): Unit = {
  // Right now this is a very simple FIFO scheduler. We keep trying to fit in the first app
  // in the queue, then the second app, etc.

  // 首先遍历waitingApps中ApplicationInfo,并且还需要判断程序中定义的使用cpu的数量-启动执行application上
  // worker上的excutor所使用的的cpu的要大于0
  for (app <- waitingApps if app.coresLeft > 0) {

    val coresPerExecutor: Option[Int] = app.desc.coresPerExecutor

    // Filter out workers that don't have enough resources to launch an executor
    // 从workers中,过滤出worker的状态为alive的,按照cpu的数量进行倒序排序
    val usableWorkers = workers.toArray.filter(_.state == WorkerState.ALIVE)
      .filter(worker => worker.memoryFree >= app.desc.memoryPerExecutorMB &&
        worker.coresFree >= coresPerExecutor.getOrElse(1))
      .sortBy(_.coresFree).reverse

    // 在worker上调度executor
    val assignedCores = scheduleExecutorsOnWorkers(app, usableWorkers, spreadOutApps)

   // SpreadOut算法,会将每一个application要启动的executor都平均分布到各个worker上去
   // 比如说20个cpu core,分配到10个worker上,实际会循环两遍worker,每次循环,
   // 给每个worker分配一个 core,最后每个worker分配了2个core
   // 总体概括:平均分布

    // 非SpreadOut算法,将每一个application,尽可能少的分配到worker上去
    // 每个application,都尽可能的分配到尽量少的worker上,比如说10个worker
    // 每个worker10个cpu,application要分配20个core,那么其实,只会分配到2个worker上
    // 每个worker都占满10个core,其余的app,就只能分配到下一个worker上
    // 总体概括:尽可能资源大的分配

   // Now that we've decided how many cores to allocate on each worker, let's allocate them
   // 给每个worker分配完application要求的cpu core之后,遍历worker,只要判断之前给这个worker分配到了core
    for (pos <- 0 until usableWorkers.length if assignedCores(pos) > 0) {
      // 就在worker上启动executor
      allocateWorkerResourceToExecutors(
        app, assignedCores(pos), coresPerExecutor, usableWorkers(pos))
    }
  }
}

第二步:调用第一步的allocateWorkerResourceToExecutors方法
源码位置:org/apache/spark/deploy/master/Master.scala

private def allocateWorkerResourceToExecutors(
    app: ApplicationInfo,
    assignedCores: Int,
    coresPerExecutor: Option[Int],
    worker: WorkerInfo): Unit = {
  // If the number of cores per executor is specified, we divide the cores assigned
  // to this worker evenly among the executors with no remainder.
  // Otherwise, we launch a single executor that grabs all the assignedCores on this worker.
  val numExecutors = coresPerExecutor.map { assignedCores / _ }.getOrElse(1)
  val coresToAssign = coresPerExecutor.getOrElse(assignedCores)

  for (i <- 1 to numExecutors) {
    // 首先,在application内部缓存结构中,添加executor
    // 并且创建ExecutorDesc对象,其中封装了,给这个executor分配了多少个cpu core
    // 基于我们的机制,实际上,最后,executor的实际数量,以及executor所对应的cpu是不一致的
    // 我们这里是根据总的机制来分配的,比如说要求启动3个executor,每一个executor3个cpu,9个worker,
    // 根据我们的算法来说的话,就是每一个worker启动一个executor,一个executor对应一个cpu core
    val exec = app.addExecutor(worker, coresToAssign)
    launchExecutor(worker, exec)
    // 将app的状态设置为running
    app.state = ApplicationState.RUNNING
  }
}

第三步:调用第二步的launchExecutor方法
源码位置:org/apache/spark/deploy/master/Master.scala

private def launchExecutor(worker: WorkerInfo, exec: ExecutorDesc): Unit = {
  logInfo("Launching executor " + exec.fullId + " on worker " + worker.id)
  // 将executor加入worker的内存缓存
  worker.addExecutor(exec)
  // 向worker的actor发生LaunchExecutor消息
  worker.endpoint.send(LaunchExecutor(masterUrl,
    exec.application.id, exec.id, exec.application.desc, exec.cores, exec.memory))
  // 向executor对应的application的driver,发生executorAdded消息
  exec.application.driver.send(
    ExecutorAdded(exec.id, worker.id, worker.hostPort, exec.cores, exec.memory))
}

第四步:调用worker类的LaunchExecutor这个case class
源码位置:org/apache/spark/deploy/worker/Worker.scala

  case LaunchExecutor(masterUrl, appId, execId, appDesc, cores_, memory_) =>
      if (masterUrl != activeMasterUrl) {
        logWarning("Invalid Master (" + masterUrl + ") attempted to launch executor.")
      } else {
        try {
          logInfo("Asked to launch executor %s/%d for %s".format(appId, execId, appDesc.name))

          // Create the executor's working directory
          // 创建exector的本地目录
          val executorDir = new File(workDir, appId + "/" + execId)
          if (!executorDir.mkdirs()) {
            throw new IOException("Failed to create directory " + executorDir)
          }

          // Create local dirs for the executor. These are passed to the executor via the
          // SPARK_EXECUTOR_DIRS environment variable, and deleted by the Worker when the
          // application finishes.
          val appLocalDirs = appDirectories.get(appId).getOrElse {
            Utils.getOrCreateLocalRootDirs(conf).map { dir =>
              val appDir = Utils.createDirectory(dir, namePrefix = "executor")
              Utils.chmod700(appDir)
              appDir.getAbsolutePath()
            }.toSeq
          }
          appDirectories(appId) = appLocalDirs

          // 创建exectorRunner
          val manager = new ExecutorRunner(
            appId,
            execId,
            appDesc.copy(command = Worker.maybeUpdateSSLSettings(appDesc.command, conf)),
            cores_,
            memory_,
            self,
            workerId,
            host,
            webUi.boundPort,
            publicAddress,
            sparkHome,
            executorDir,
            workerUri,
            conf,
            appLocalDirs, ExecutorState.RUNNING)

          // 将executorRunner加入本地缓存
          executors(appId + "/" + execId) = manager

          // 启动ExecutorRunner
          manager.start()

          // 加上executor要使用的资源
          coresUsed += cores_
          memoryUsed += memory_

          // 向master发送ExecutorStateChanged这个事件
          sendToMaster(ExecutorStateChanged(appId, execId, manager.state, None, None))
        } catch {
          case e: Exception => {
            logError(s"Failed to launch executor $appId/$execId for ${appDesc.name}.", e)
            if (executors.contains(appId + "/" + execId)) {
              executors(appId + "/" + execId).kill()
              executors -= appId + "/" + execId
            }
            sendToMaster(ExecutorStateChanged(appId, execId, ExecutorState.FAILED,
              Some(e.toString), None))
          }
        }
      }

第五步:调用第四步的ExecutorRunner
源码位置:org/apache/spark/deploy/worker/ExecutorRunner.scala

private[worker] def start() {
  workerThread = new Thread("ExecutorRunner for " + fullId) {
    //
    override def run() { fetchAndRunExecutor() }
  }
  workerThread.start()
  // Shutdown hook that kills actors on shutdown.
  shutdownHook = ShutdownHookManager.addShutdownHook { () =>
    // It's possible that we arrive here before calling `fetchAndRunExecutor`, then `state` will
    // be `ExecutorState.RUNNING`. In this case, we should set `state` to `FAILED`.
    if (state == ExecutorState.RUNNING) {
      state = ExecutorState.FAILED
    }
    killProcess(Some("Worker shutting down")) }
}

第六步:调用第五步的fetchAndRunExecutor
源码位置:org/apache/spark/deploy/worker/ExecutorRunner.scala

  /**
   * Download and run the executor described in our ApplicationDescription
   */
  private def fetchAndRunExecutor() {
    try {
      // Launch the process
      // 封装一个ProcessBuilder
      val builder = CommandUtils.buildProcessBuilder(appDesc.command, new SecurityManager(conf),
        memory, sparkHome.getAbsolutePath, substituteVariables)

      val command = builder.command()
      val formattedCommand = command.asScala.mkString("\"", "\" \"", "\"")
      logInfo(s"Launch command: $formattedCommand")

      builder.directory(executorDir)
      builder.environment.put("SPARK_EXECUTOR_DIRS", appLocalDirs.mkString(File.pathSeparator))
      // In case we are running this from within the Spark Shell, avoid creating a "scala"
      // parent process for the executor command
      builder.environment.put("SPARK_LAUNCH_WITH_SCALA", "0")

      // Add webUI log urls
      val baseUrl =
        s"http://$publicAddress:$webUiPort/logPage/?appId=$appId&executorId=$execId&logType="
      builder.environment.put("SPARK_LOG_URL_STDERR", s"${baseUrl}stderr")
      builder.environment.put("SPARK_LOG_URL_STDOUT", s"${baseUrl}stdout")

      process = builder.start()

      // 重定向文件输出流,将stdout和stderr报错到本地目录
      val header = "Spark Executor Command: %s\n%s\n\n".format(
        formattedCommand, "=" * 40)

      // Redirect its stdout and stderr to files
      val stdout = new File(executorDir, "stdout")
      stdoutAppender = FileAppender(process.getInputStream, stdout, conf)

      val stderr = new File(executorDir, "stderr")
      Files.write(header, stderr, UTF_8)
      stderrAppender = FileAppender(process.getErrorStream, stderr, conf)

      // Wait for it to exit; executor may exit with code 0 (when driver instructs it to shutdown)
      // or with nonzero exit code

      // 调用Process的waitFor()方法,启动executor进程
      val exitCode = process.waitFor()

      // executor执行完之后拿到返回状态
      state = ExecutorState.EXITED
      val message = "Command exited with code " + exitCode

      // 向worker发送ExecutorStateChanged消息
      worker.send(ExecutorStateChanged(appId, execId, state, Some(message), Some(exitCode)))
    } catch {
      case interrupted: InterruptedException => {
        logInfo("Runner thread for executor " + fullId + " interrupted")
        state = ExecutorState.KILLED
        killProcess(None)
      }
      case e: Exception => {
        logError("Error running executor", e)
        state = ExecutorState.FAILED
        killProcess(Some(e.toString))
      }
    }
  }
}

猜你喜欢

转载自blog.csdn.net/weixin_39478115/article/details/79322816