ASID

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/tiantao2012/article/details/82756686
为了提高TLB的性能,将TLB分成Global和process-specific。global 是指常驻在tlb中不会被刷出的,例如内核空间的翻译,process-specific 是指每个进程独有的地址空间,当发生进程切换的时候,这部分tlb可以被刷出
为了支持process-specific的tlb,arm提出了ASID(Adress Space ID)的硬件解决方案,这样TLB就可以识别出进程的TLB entry。ASID是通过位图来管理的,已经分配的ASID 都记录在asid_map中。
在进程切换的时候__schedule()->context_switch()->switch_mm()->check_and_switch_context中可以看到asid的应用
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
{
	unsigned long flags;
	u64 asid, old_active_asid;
#进程的ASID是放在context.id原子变量中
	asid = atomic64_read(&mm->context.id);

	/*
	 * The memory ordering here is subtle.
	 * If our active_asids is non-zero and the ASID matches the current
	 * generation, then we update the active_asids entry with a relaxed
	 * cmpxchg. Racing with a concurrent rollover means that either:
	 *
	 * - We get a zero back from the cmpxchg and end up waiting on the
	 *   lock. Taking the lock synchronises with the rollover and so
	 *   we are forced to see the updated generation.
	 *
	 * - We get a valid ASID back from the cmpxchg, which means the
	 *   relaxed xchg in flush_context will treat us as reserved
	 *   because atomic RmWs are totally ordered for a given location.
	 */
	old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
#换入进程ASID没有发生ASID溢出,不需要刷新TLB
	if (old_active_asid &&
	    !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
#更新当前的active_asids
	    atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
				     old_active_asid, asid))
		goto switch_mm_fastpath;

	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	/* Check that our ASID belongs to the current generation. */
#发生ASID 硬件溢出,需要重新给进程分配ASID
	asid = atomic64_read(&mm->context.id);
	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
		asid = new_context(mm, cpu);
		atomic64_set(&mm->context.id, asid);
	}
#清楚当前cpu的TLB
	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
		local_flush_tlb_all();

	atomic64_set(&per_cpu(active_asids, cpu), asid);
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);

switch_mm_fastpath:

	arm64_apply_bp_hardening();

	/*
	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
	 * emulating PAN.
	 */
	if (!system_uses_ttbr0_pan())
#更新ASID
		cpu_switch_mm(mm->pgd, mm);
}


./arch/arm64/mm/proc.S
ENTRY(cpu_do_switch_mm)
        mrs     x2, ttbr1_el1
        mmid    x1, x1                          // get mm->context.id
        phys_to_ttbr x3, x0
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
        bfi     x3, x1, #48, #16                // set the ASID field in TTBR0
#endif
#设置ASID
        bfi     x2, x1, #48, #16                // set the ASID
        msr     ttbr1_el1, x2                   // in TTBR1 (since TCR.A1 is set)
        isb
        msr     ttbr0_el1, x3                   // now update TTBR0
        isb
        b       post_ttbr_update_workaround     // Back to C code...
ENDPROC(cpu_do_switch_mm)

猜你喜欢

转载自blog.csdn.net/tiantao2012/article/details/82756686
今日推荐