第4章进程调度(三)

4.5 Linux调度的实现

讨论采用CFS调度算法的动机和其内部逻辑后,开始具体探索CFS是如何实现的。代码位于kernel/sched_fair.c中。特别关注四个组成部分:

时间记账

进程选择

调度器入口

睡眠和唤醒

1、时间记账

所有的调度器都必须对进程运行时间做记账。多数Unix系统,分配一个时间片个每一个进程。当每次系统时钟节拍发生时,时间片都会被减少一个节拍周期。当一个进程的时间片被减少到0时,它会被另一个尚未减到0的时间片可运行进程抢占。

1)调度器实体结构

CFS必须维护每个进程运行的时间记账,因为它需要确保每个进程只在公平分配给它的处理器时间内运行。CFS使用调度器实体结构struct sched_entity(定义在include/linux/sched.h)来追踪进程运行记账:

struct sched_entity {
        struct load_weight      load;           /* for load-balancing */
        struct rb_node          run_node;
        struct list_head        group_node;
        unsigned int            on_rq;

        u64                     exec_start;
        u64                     sum_exec_runtime;
        u64                     vruntime;
        u64                     prev_sum_exec_runtime;

        u64                     last_wakeup;
        u64                     avg_overlap;

        u64                     nr_migrations;

        u64                     start_runtime;
        u64                     avg_wakeup;

#ifdef CONFIG_SCHEDSTATS
        u64                     wait_start;
        u64                     wait_max;
        u64                     wait_count;
        u64                     wait_sum;
        u64                     iowait_count;
        u64                     iowait_sum;

        u64                     sleep_start;
        u64                     sleep_max;
        s64                     sum_sleep_runtime;

        u64                     block_start;
        u64                     block_max;
        u64                     exec_max;
        u64                     slice_max;

        u64                     nr_migrations_cold;
        u64                     nr_failed_migrations_affine;
        u64                     nr_failed_migrations_running;
        u64                     nr_failed_migrations_hot;
        u64                     nr_forced_migrations;

        u64                     nr_wakeups;
        u64                     nr_wakeups_sync;
        u64                     nr_wakeups_migrate;
        u64                     nr_wakeups_local;
        u64                     nr_wakeups_remote;
        u64                     nr_wakeups_affine;
        u64                     nr_wakeups_affine_attempts;
        u64                     nr_wakeups_passive;
        u64                     nr_wakeups_idle;
#endif

#ifdef CONFIG_FAIR_GROUP_SCHED
        struct sched_entity     *parent;
        /* rq on which this entity is (to be) queued: */
        struct cfs_rq           *cfs_rq;
        /* rq "owned" by this entity/group: */
        struct cfs_rq           *my_q;
#endif
};

调度器实体结构作为一个名为se的成员变量,嵌入在进程描述符struct task_struct内。

include/linux/sched.h

struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        void *stack;
        atomic_t usage;
        unsigned int flags;     /* per process flags, defined below */
        unsigned int ptrace;

        int lock_depth;         /* BKL lock depth */

#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
        int oncpu;
#endif
#endif

        int prio, static_prio, normal_prio;
        unsigned int rt_priority;
        const struct sched_class *sched_class;
        struct sched_entity se;
        struct sched_rt_entity rt;

#ifdef CONFIG_PREEMPT_NOTIFIERS
        /* list of struct preempt_notifier: */
        struct hlist_head preempt_notifiers;
#endif

        /*
         * fpu_counter contains the number of consecutive context switches
         * that the FPU is used. If this is over a threshold, the lazy fpu
         * saving becomes unlazy to save the trap. This is an unsigned char
         * so that after 256 times the counter wraps and the behavior turns
         * lazy again; this to deal with bursty apps that only use FPU for
         * a short time
         */
        unsigned char fpu_counter;
#ifdef CONFIG_BLK_DEV_IO_TRACE
        unsigned int btrace_seq;
#endif

        unsigned int policy;
        cpumask_t cpus_allowed;

#ifdef CONFIG_TREE_PREEMPT_RCU
        int rcu_read_lock_nesting;
        char rcu_read_unlock_special;
        struct rcu_node *rcu_blocked_node;
        struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */

#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        struct sched_info sched_info;
#endif

        struct list_head tasks;
        struct plist_node pushable_tasks;

        struct mm_struct *mm, *active_mm;
#if defined(SPLIT_RSS_COUNTING)
        struct task_rss_stat    rss_stat;
#endif
/* task state */
        int exit_state;
        int exit_code, exit_signal;
        int pdeath_signal;  /*  The signal sent when the parent dies  */
        /* ??? */
        unsigned int personality;
        unsigned did_exec:1;
        unsigned in_execve:1;   /* Tell the LSMs that the process is doing an
                                 * execve */
        unsigned in_iowait:1;


        /* Revert to default priority/policy when forking */
        unsigned sched_reset_on_fork:1;

        pid_t pid;
        pid_t tgid;

#ifdef CONFIG_CC_STACKPROTECTOR
        /* Canary value for the -fstack-protector gcc feature */
        unsigned long stack_canary;
#endif

        /* 
         * pointers to (original) parent process, youngest child, younger sibling,
         * older sibling, respectively.  (p->father can be replaced with 
         * p->real_parent->pid)
         */
        struct task_struct *real_parent; /* real parent process */
        struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
        /*
         * children/sibling forms the list of my natural children
         */
        struct list_head children;      /* list of my children */
        struct list_head sibling;       /* linkage in my parent's children list */
        struct task_struct *group_leader;       /* threadgroup leader */

        /*
         * ptraced is the list of tasks this task is using ptrace on.
         * This includes both natural children and PTRACE_ATTACH targets.
         * p->ptrace_entry is p's link on the p->parent->ptraced list.
         */
        struct list_head ptraced;
        struct list_head ptrace_entry;

        /*
         * This is the tracer handle for the ptrace BTS extension.
         * This field actually belongs to the ptracer task.
         */
        struct bts_context *bts;

        /* PID/PID hash table linkage. */
        struct pid_link pids[PIDTYPE_MAX];
        struct list_head thread_group;

        struct completion *vfork_done;          /* for vfork() */
        int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */

        cputime_t utime, stime, utimescaled, stimescaled;
        cputime_t gtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
        cputime_t prev_utime, prev_stime;
#endif
        unsigned long nvcsw, nivcsw; /* context switch counts */
        struct timespec start_time;             /* monotonic time */
        struct timespec real_start_time;        /* boot based time */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
        unsigned long min_flt, maj_flt;

        struct task_cputime cputime_expires;
        struct list_head cpu_timers[3];

/* process credentials */
        const struct cred *real_cred;   /* objective and real subjective task
                                         * credentials (COW) */
        const struct cred *cred;        /* effective (overridable) subjective task
                                         * credentials (COW) */
        struct mutex cred_guard_mutex;  /* guard against foreign influences on
                                         * credential calculations
                                         * (notably. ptrace) */
        struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */

        char comm[TASK_COMM_LEN]; /* executable name excluding path
                                     - access with [gs]et_task_comm (which lock
                                       it with task_lock())
                                     - initialized normally by setup_new_exec */
/* file system info */
        int link_count, total_link_count;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
        struct sysv_sem sysvsem;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
        unsigned long last_switch_count;
#endif
/* CPU-specific state of this task */
        struct thread_struct thread;
/* filesystem information */
        struct fs_struct *fs;
/* open file information */
        struct files_struct *files;
/* namespaces */
        struct nsproxy *nsproxy;
/* signal handlers */
        struct signal_struct *signal;
        struct sighand_struct *sighand;

        sigset_t blocked, real_blocked;
        sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
        struct sigpending pending;

        unsigned long sas_ss_sp;
        size_t sas_ss_size;
        int (*notifier)(void *priv);
        void *notifier_data;
        sigset_t *notifier_mask;
        struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
        uid_t loginuid;
        unsigned int sessionid;
#endif
        seccomp_t seccomp;

/* Thread group tracking */
        u32 parent_exec_id;
        u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
 * mempolicy */
        spinlock_t alloc_lock;

#ifdef CONFIG_GENERIC_HARDIRQS
        /* IRQ handler threads */
        struct irqaction *irqaction;
#endif

        /* Protection of the PI data structures: */
        raw_spinlock_t pi_lock;

#ifdef CONFIG_RT_MUTEXES
        /* PI waiters blocked on a rt_mutex held by this task */
        struct plist_head pi_waiters;
        /* Deadlock detection and priority inheritance handling */
        struct rt_mutex_waiter *pi_blocked_on;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
        /* mutex deadlock detection */
        struct mutex_waiter *blocked_on;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
        unsigned int irq_events;
        unsigned long hardirq_enable_ip;
        unsigned long hardirq_disable_ip;
        unsigned int hardirq_enable_event;
        unsigned int hardirq_disable_event;
        int hardirqs_enabled;
        int hardirq_context;
        unsigned long softirq_disable_ip;
        unsigned long softirq_enable_ip;
        unsigned int softirq_disable_event;
        unsigned int softirq_enable_event;
        int softirqs_enabled;
        int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
        u64 curr_chain_key;
        int lockdep_depth;
        unsigned int lockdep_recursion;
        struct held_lock held_locks[MAX_LOCK_DEPTH];
        gfp_t lockdep_reclaim_gfp;
#endif

/* journalling filesystem info */
        void *journal_info;

/* stacked block device info */
        struct bio_list *bio_list;

/* VM state */
        struct reclaim_state *reclaim_state;

        struct backing_dev_info *backing_dev_info;

        struct io_context *io_context;

        unsigned long ptrace_message;
        siginfo_t *last_siginfo; /* For ptrace use.  */
        struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
        u64 acct_rss_mem1;      /* accumulated rss usage */
        u64 acct_vm_mem1;       /* accumulated virtual memory usage */
        cputime_t acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
        nodemask_t mems_allowed;        /* Protected by alloc_lock */
        int cpuset_mem_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
        /* Control Group info protected by css_set_lock */
        struct css_set *cgroups;
        /* cg_list protected by css_set_lock and tsk->alloc_lock */
        struct list_head cg_list;
#endif
#ifdef CONFIG_FUTEX
        struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
        struct compat_robust_list_head __user *compat_robust_list;
#endif
        struct list_head pi_state_list;
        struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
        struct perf_event_context *perf_event_ctxp;
        struct mutex perf_event_mutex;
        struct list_head perf_event_list;
#endif
#ifdef CONFIG_NUMA
        struct mempolicy *mempolicy;    /* Protected by alloc_lock */
        short il_next;
#endif
        atomic_t fs_excl;       /* holding fs exclusive resources */
        struct rcu_head rcu;

        /*
         * cache last used pipe for splice
         */
        struct pipe_inode_info *splice_pipe;
#ifdef  CONFIG_TASK_DELAY_ACCT
        struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
        int make_it_fail;
#endif
        struct prop_local_single dirties;
#ifdef CONFIG_LATENCYTOP
        int latency_record_count;
        struct latency_record latency_record[LT_SAVECOUNT];
#endif
        /*
         * time slack values; these are used to round up poll() and
         * select() etc timeout values. These are in nanoseconds.
         */
        unsigned long timer_slack_ns;
        unsigned long default_timer_slack_ns;

        struct list_head        *scm_work_list;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
        /* Index of current stored address in ret_stack */
        int curr_ret_stack;
        /* Stack of return addresses for return function tracing */
        struct ftrace_ret_stack *ret_stack;
        /* time stamp for last schedule */
        unsigned long long ftrace_timestamp;
        /*
         * Number of functions that haven't been traced
         * because of depth overrun.
         */
        atomic_t trace_overrun;
        /* Pause for the tracing */
        atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
        /* state flags for use by tracers */
        unsigned long trace;
        /* bitmask of trace recursion */
        unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
        struct memcg_batch_info {
                int do_batch;   /* incremented when batch uncharge started */
                struct mem_cgroup *memcg; /* target memcg of uncharge */
                unsigned long bytes;            /* uncharged usage */
                unsigned long memsw_bytes; /* uncharged mem+swap usage */
        } memcg_batch;
#endif
};

2)虚拟实时

vruntime变量存放进程的虚拟运行时间,该运行时间的计算是经过了所有可运行进程总数的标准化。单位ns,vruntime和定时器节拍不再相关。虚拟运行时间可以帮助我们逼近CFS模型所追求的理想多任务处理器。如果真有这样一个理想的处理器,那么就不再需要vruntime了。因为优先级相同的所有进程的虚拟运行时都是相同——所有任务都将接收到相等的处理器份额。但是因为处理器无法实现完美的多任务,它必须依次运行每个任务。因为CFS使用vruntime变量来记录一个程序到底运行了多长时间以及它还应该再运行多久。

定义在kernel/sched_fair.c文件中的update_curr()函数实现了该记账功能:

static void update_curr(struct cfs_rq *cfs_rq)
{
        struct sched_entity *curr = cfs_rq->curr;
        u64 now = rq_of(cfs_rq)->clock;
        unsigned long delta_exec;

        if (unlikely(!curr))
                return;

        /*
         * Get the amount of time the current task was running
         * since the last time we changed load (this cannot
         * overflow on 32 bits):
         */
        delta_exec = (unsigned long)(now - curr->exec_start);
        if (!delta_exec)
                return;

        __update_curr(cfs_rq, curr, delta_exec);
        curr->exec_start = now;

        if (entity_is_task(curr)) {
                struct task_struct *curtask = task_of(curr);

                trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
                cpuacct_charge(curtask, delta_exec);
                account_group_exec_runtime(curtask, delta_exec);
        }
}

update_curr()计算了当前进程的执行时间,并将其存放在变量delta_exec中。然后它又将运行时间传递给了 __update_curr,由后者再根据当前可运行进程总数对运行时间进行加权计算。最终将上述的权重值与当前运行进程的vruntime相加。

/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static inline void __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,  unsigned long delta_exec)
{
        unsigned long delta_exec_weighted;

        schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));

        curr->sum_exec_runtime += delta_exec;
        schedstat_add(cfs_rq, exec_clock, delta_exec);
        delta_exec_weighted = calc_delta_fair(delta_exec, curr);

        curr->vruntime += delta_exec_weighted;
        update_min_vruntime(cfs_rq);
}

update_curr()是由系统定时器周期性调用的,无论是在进程处于可运行状态,还是被堵塞处于不可运行状态。根据这种方式,vruntime可以准确地测量给定进程的运行时间,而且可知道谁是下一个被运行的进程。

2、进程选择、

若存在一个完美的多任务处理器,所有可运行进程的vruntime值将一致。但事实上没有找到完美的多任务处理器,因此CFS视图利用一个简单的规则去均衡进程的虚拟运行时间:当CFS需要选择下一个运行进程时,它会挑一个具有最小vruntime的进程。这就是CFS调度算法的核心:选择具有最小的vruntime的任务。剩下的内容来讨论如何实现选择具有最小vruntime值的进程。

CFS使用红黑树来组织可运行进程队列,并利用其迅速找到最小vruntime值的进程。在Linux中,红黑树称为rbtree,是一个自平衡二叉搜索树。红黑树是一种以树节点形式存储的数据,这些数据都会对应一个键值。通过这些键值来快速检索节点上的数据。

1)挑选下一个任务

假设,有一个红黑树存储了系统中所有的可运行进程,其中节点的键值是可运行进程的虚拟运行时间。CFS调度器选取待运行的下一个进程,是所有进程vruntime最小的那个,它对应的是在树中最左侧的叶子节点。从树的根节点沿着左边的子节点向下找,一直找到叶子节点,就找到了其vruntime值最小的那个进程。CFS的进程选择算法可总结为运行红黑树中最左边叶子节点所代表的那个进程。实现这个过程的函数是__pick_next_entity(),定义在kernel/sched_fair.c中:

static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
{
        struct rb_node *left = cfs_rq->rb_leftmost;

        if (!left)
                return NULL;

        return rb_entry(left, struct sched_entity, run_node);
}

2)向树中加入进程

CFS如何将进程加入红黑树中,以及如何缓存最左边叶子节点。这一切发生在进程变为可运行状态或者是通过fork()系统调用第一次创建进程时。enqueue_entity()函数实现了这个目的:kernel/sched_fair.c

static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
        /*
         * Update the normalized vruntime before updating min_vruntime
         * through callig update_curr().
         */
        if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
                se->vruntime += cfs_rq->min_vruntime;

        /*
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
        account_entity_enqueue(cfs_rq, se);

        if (flags & ENQUEUE_WAKEUP) {
                place_entity(cfs_rq, se, 0);
                enqueue_sleeper(cfs_rq, se);
        }

        update_stats_enqueue(cfs_rq, se);
        check_spread(cfs_rq, se);
        if (se != cfs_rq->curr)
                __enqueue_entity(cfs_rq, se);
}

这个函数更新运行时间和其他一些统计数据,然后调用__enqueue_entity进行插入操作,把数据项插入到红黑树中。

/*
 * Enqueue(入队) an entity into the rb-tree(把一个调度实体插入到红黑树中)
 */
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
        struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
        struct rb_node *parent = NULL;
        struct sched_entity *entry;
        s64 key = entity_key(cfs_rq, se);
        int leftmost = 1;

        /*
         * Find the right place in the rbtree:
         */
        while (*link) {
                parent = *link;
                entry = rb_entry(parent, struct sched_entity, run_node);
                /*
                 * We dont care about collisions. Nodes with
                 * the same key stay together.
                 */
                if (key < entity_key(cfs_rq, entry)) {
                        link = &parent->rb_left;
                } else {
                        link = &parent->rb_right;
                        leftmost = 0;
                }
        }

        /*
         * Maintain a cache of leftmost tree entries (it is frequently
         * used):
         */
        if (leftmost)
                cfs_rq->rb_leftmost = &se->run_node;

        rb_link_node(&se->run_node, parent, link);
        rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}

分析:while()循环中遍历树以寻找合适的匹配键值,该值是被插入进程vruntime。平衡二叉树的基本规则是,如果键值小于当前节点的键值,则需转向树的左分支;如果键值大于当前节点的键值,则转向树的右分支。如果一旦走过右边分支,哪怕一次,说明插入的进程不会是新的最左节点,可以设置leftmost为0。如果一直都是向左移动,那么leftmost维持1,这说明有一个新的最左节点,并且可以更新缓存——设置rb_leftmost指向被插入的进程。当沿着一个方向和一个没有子节点的节点比较后:link如果这时是NULL,循环终止。当退出循环后,接着在父节点上调用rb_link_node(),以使得新插入的进程成为其子节点。最后函数rb_insert_color()更新树的自平衡相关属性。

3)从树中删除进程

CFS如何从红黑树中删除进程的。删除动作发生在进程堵塞(变为不可运行状态)或者终止时(结束运行):

 kernel/sched_fair.c

static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
        /*
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);

        update_stats_dequeue(cfs_rq, se);
        if (sleep) {
#ifdef CONFIG_SCHEDSTATS
                if (entity_is_task(se)) {
                        struct task_struct *tsk = task_of(se);

                        if (tsk->state & TASK_INTERRUPTIBLE)
                                se->sleep_start = rq_of(cfs_rq)->clock;
                        if (tsk->state & TASK_UNINTERRUPTIBLE)
                                se->block_start = rq_of(cfs_rq)->clock;
                }
#endif
        }

        clear_buddies(cfs_rq, se);

        if (se != cfs_rq->curr)
                __dequeue_entity(cfs_rq, se);
        account_entity_dequeue(cfs_rq, se);
        update_min_vruntime(cfs_rq);

        /*
         * Normalize the entity after updating the min_vruntime because the
         * update can refer to the ->curr item and we need to reflect this
         * movement in our normalized position.
         */
        if (!sleep)
                se->vruntime -= cfs_rq->min_vruntime;
}

实际工作由辅助函数__dequeue_entity()完成的。

static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
        if (cfs_rq->rb_leftmost == &se->run_node) {
                struct rb_node *next_node;

                next_node = rb_next(&se->run_node);
                cfs_rq->rb_leftmost = next_node;
        }

        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}

rb_erase()函数定义在lib/rbtree.c文件中:

void rb_erase(struct rb_node *node, struct rb_root *root)
{
        struct rb_node *child, *parent;
        int color;

        if (!node->rb_left)
                child = node->rb_right;
        else if (!node->rb_right)
                child = node->rb_left;
        else
        {
                struct rb_node *old = node, *left;

                node = node->rb_right;
                while ((left = node->rb_left) != NULL)
                        node = left;

                if (rb_parent(old)) {
                        if (rb_parent(old)->rb_left == old)
                                rb_parent(old)->rb_left = node;
                        else
                                rb_parent(old)->rb_right = node;
                } else
                        root->rb_node = node;

                child = node->rb_right;
                parent = rb_parent(node);
                color = rb_color(node);

                if (parent == old) {
                        parent = node;
                } else {
                        if (child)
                                rb_set_parent(child, parent);
                        parent->rb_left = child;

                        node->rb_right = old->rb_right;
                        rb_set_parent(old->rb_right, node);
                }

                node->rb_parent_color = old->rb_parent_color;
                node->rb_left = old->rb_left;
                rb_set_parent(old->rb_left, node);

                goto color;
        }

        parent = rb_parent(node);
        color = rb_color(node);

        if (child)
                rb_set_parent(child, parent);
        if (parent)
        {
                if (parent->rb_left == node)
                        parent->rb_left = child;
                else
                        parent->rb_right = child;
        }
        else
                root->rb_node = child;

 color:
        if (color == RB_BLACK)
                __rb_erase_color(child, parent, root);
}

分析:从红黑树中删除进程容易。因为红黑树实现了rb_erase()函数,它可完成所有的工作。该函数的剩下工作是更新rb_leftmost缓存。如果要删除的进程是最左节点,那么该函数要调用rb_next()按顺序遍历,找到谁是下一个节点,也就是当前最左节点被删除后,新的最左节点。

3、调度器入口

进程调度的主要入口点是函数schedule(),定义在kernel/sched.c中。是内核其它部分调用进程调度器的入口:选择哪个进程可以运行,何时将其投入运行。schedule()通常都需要和一个具体的调度类相关联,它会找到一个最高优先级的调度类——后者需要有自己的可运行队列,然后问后者谁才是下一个该运行的进程。该函数中唯一重要的事情是,它会调用pick_next_task()(定义在kernel/sched.c中)。pick_next_task()以优先级为序,从高到低,依次检查每一个调度类,并且从最高优先级的调度类中,选择最高优先级的进程。

kernel/sched.c
/*
 * schedule() is the main scheduler function.
 */
asmlinkage void __sched schedule(void)
{
        struct task_struct *prev, *next;
        unsigned long *switch_count;
        struct rq *rq;
        int cpu;

need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        rcu_sched_qs(cpu);
        prev = rq->curr;
        switch_count = &prev->nivcsw;

        release_kernel_lock(prev);
need_resched_nonpreemptible:

        schedule_debug(prev);

        if (sched_feat(HRTICK))
                hrtick_clear(rq);

        raw_spin_lock_irq(&rq->lock);
        update_rq_clock(rq);
        clear_tsk_need_resched(prev);

        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev)))
                        prev->state = TASK_RUNNING;
                else
                        deactivate_task(rq, prev, 1);
                switch_count = &prev->nvcsw;
        }

        pre_schedule(rq, prev);

        if (unlikely(!rq->nr_running))
                idle_balance(cpu, rq);

        put_prev_task(rq, prev);
        next = pick_next_task(rq);

        if (likely(prev != next)) {
                sched_info_switch(prev, next);
                perf_event_task_sched_out(prev, next);

                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;

                context_switch(rq, prev, next); /* unlocks the rq */
                /*
                 * the context switch might have flipped the stack from under
                 * us, hence refresh the local variables.
                 */
                cpu = smp_processor_id();
                rq = cpu_rq(cpu);
        } else
                raw_spin_unlock_irq(&rq->lock);

        post_schedule(rq);

        if (unlikely(reacquire_kernel_lock(current) < 0)) {
                prev = rq->curr;
                switch_count = &prev->nivcsw;
                goto need_resched_nonpreemptible;
        }

        preempt_enable_no_resched();
        if (need_resched())
                goto need_resched;
}

/*
 * Pick up the highest-prio task:
 */
static inline struct task_struct * pick_next_task(struct rq *rq)
{
        const struct sched_class *class;
        struct task_struct *p;

        /*
         * Optimization: we know that if all tasks are in
         * the fair class we can call that function directly:
         */
        if (likely(rq->nr_running == rq->cfs.nr_running)) {
                p = fair_sched_class.pick_next_task(rq);
                if (likely(p))
                        return p;
        }

        class = sched_class_highest;
        for ( ; ; ) {
                p = class->pick_next_task(rq);
                if (p)
                        return p;
                /*
                 * Will never be NULL as the idle class always
                 * returns a non-NULL p:
                 */
                class = class->next;
        }
}

注意该函数开始部分的优化。因为CFS是普通进程的调度类,而系统运行的绝大多数进程都是普通进程,因此这里有一个小技巧用来加速选择下一个CFS提供的进程,前提是所有可运行进程数量等于CFS类对应的可运行进程数。

该函数的核心是for()循环,它以优先级为序,从最高的优先级类开始,遍历了每一个调度类。每一个调度类都实现了pick_next_task()函数,它会返回指向下一个可运行进程的指针,或者没有时返回NULL。会从第一个返回非NULL值的类中选择下一个可运行进程。CFS中pick_next_task()实现会调用pick_next_entity(),而该函数会在来调用__pick_next_entity()函数。

猜你喜欢

转载自blog.csdn.net/xiezhi123456/article/details/81112348