基于android Linux kernel 3.10.87分析


基于Linux kernel 3.10.87分析

一、什么是device tree?

二、为什么使用device tree?

三、怎么使用device tree?
1、初始化流程。也就是扫描dtb并将其转换成Device Tree Structure。
2、传递运行时参数传递以及platform的识别流程分析
3、如何将Device Tree Structure并入linux kernel的设备驱动模型。

//linux kernel 第一链接脚本文件Vmlinux.lds.in (h:\work\kernel\linux-3.10.87\linux-3.10.87\arch\arm\boot\compressed) 1639 2015/8/17

OUTPUT_ARCH(arm)
ENTRY(_start)
SECTIONS
{
  /DISCARD/ : {
    *(.ARM.exidx*)
    *(.ARM.extab*)
    /*
     * Discard any r/w data - this produces a link error if we have any,
     * which is required for PIC decompression.  Local data generates
     * GOTOFF relocations, which prevents it being relocated independently
     * of the text/got segments.
     */
    *(.data)
  }

  . = TEXT_START;
  _text = .;

  .text : {
    _start = .;
    *(.start)
    *(.text)
    *(.text.*)
    *(.fixup)
    *(.gnu.warning)
    *(.glue_7t)
    *(.glue_7)
  }
  .rodata : {
    *(.rodata)
    *(.rodata.*)
  }
  .piggydata : {
    *(.piggydata)
  }

  . = ALIGN(4);
  _etext = .;

  .got.plt  : { *(.got.plt) }
  _got_start = .;
  .got   : { *(.got) }
  _got_end = .;

  /* ensure the zImage file size is always a multiple of 64 bits */
  /* (without a dummy byte, ld just ignores the empty section) */
  .pad   : { BYTE(0); . = ALIGN(8); }
  _edata = .;

  . = BSS_START;
  __bss_start = .;
  .bss   : { *(.bss) }
  _end = .;

  . = ALIGN(8);  /* the stack must be 64-bit aligned */
  .stack  : { *(.stack) }

  .stab 0  : { *(.stab) }
  .stabstr 0  : { *(.stabstr) }
  .stab.excl 0  : { *(.stab.excl) }
  .stab.exclstr 0 : { *(.stab.exclstr) }
  .stab.index 0  : { *(.stab.index) }
  .stab.indexstr 0 : { *(.stab.indexstr) }
  .comment 0  : { *(.comment) }
}

linux kernel 进入的第一执行文件
//head.S (h:\work\kernel\linux-3.10.87\linux-3.10.87\arch\arm\boot\compressed) 31868 2015/8/17


  .section ".start", #alloc, #execinstr
/*
 * sort out different calling conventions
 */
  .align
  .arm    @ Always enter in ARM state
start:
  .type start,#function
  .rept 7
  mov r0, r0
  .endr
   ARM(  mov r0, r0  )
   ARM(  b 1f  )
 THUMB(  adr r12, BSYM(1f) )
 THUMB(  bx r12  )

  .word 0x016f2818  @ Magic numbers to help the loader
  .word start   @ absolute load/run zImage address
  .word _edata   @ zImage end address
 THUMB(  .thumb   )
1:
  mrs r9, cpsr
#ifdef CONFIG_ARM_VIRT_EXT
  bl __hyp_stub_install @ get into SVC mode, reversibly
#endif
  mov r7, r1   @ save architecture ID
  mov r8, r2   @ save atags pointer

#ifndef __ARM_ARCH_2__
  /*
   * Booting from Angel - need to enter SVC mode and disable
   * FIQs/IRQs (numeric definitions from angel arm.h source).
   * We only do this if we were in user mode on entry.
   */
  mrs r2, cpsr  @ get current mode
  tst r2, #3   @ not user?
  bne not_angel
  mov r0, #0x17  @ angel_SWIreason_EnterSVC
 ARM(  swi 0x123456 ) @ angel_SWI_ARM
 THUMB(  svc 0xab  ) @ angel_SWI_THUMB
not_angel:
  safe_svcmode_maskall r0
  msr spsr_cxsf, r9  @ Save the CPU boot mode in
      @ SPSR
#else
  teqp pc, #0x0c000003  @ turn off interrupts
#endif

  /*
   * Note that some cache flushing and other stuff may
   * be needed here - is there an Angel SWI call for this?
   */

  /*
   * some architecture specific code can be inserted
   * by the linker here, but it should preserve r7, r8, and r9.
   */

  .text

#ifdef CONFIG_AUTO_ZRELADDR
  @ determine final kernel image address
  mov r4, pc
  and r4, r4, #0xf8000000
  add r4, r4, #TEXT_OFFSET
#else
  ldr r4, =zreladdr
#endif

  bl cache_on


restart: adr r0, LC0
  ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
  ldr sp, [r0, #28]

  /*
   * We might be running at a different address.  We need
   * to fix up various pointers.
   */
  sub r0, r0, r1  @ calculate the delta offset
  add r6, r6, r0  @ _edata
  add r10, r10, r0  @ inflated kernel size location

  /*
   * The kernel build system appends the size of the
   * decompressed kernel at the end of the compressed data
   * in little-endian form.
   */
  ldrb r9, [r10, #0]
  ldrb lr, [r10, #1]
  orr r9, r9, lr, lsl #8
  ldrb lr, [r10, #2]
  ldrb r10, [r10, #3]
  orr r9, r9, lr, lsl #16
  orr r9, r9, r10, lsl #24

#ifndef CONFIG_ZBOOT_ROM
  /* malloc space is above the relocated stack (64k max) */
  add sp, sp, r0
  add r10, sp, #0x10000
#else
  /*
   * With ZBOOT_ROM the bss/stack is non relocatable,
   * but someone could still run this code from RAM,
   * in which case our reference is _edata.
   */
  mov r10, r6
#endif

  mov r5, #0   @ init dtb size to 0
#ifdef CONFIG_ARM_APPENDED_DTB
/*
 *   r0  = delta
 *   r2  = BSS start
 *   r3  = BSS end
 *   r4  = final kernel address
 *   r5  = appended dtb size (still unknown)
 *   r6  = _edata
 *   r7  = architecture ID
 *   r8  = atags/device tree pointer
 *   r9  = size of decompressed image
 *   r10 = end of this image, including  bss/stack/malloc space if non XIP
 *   r11 = GOT start
 *   r12 = GOT end
 *   sp  = stack pointer
 *
 * if there are device trees (dtb) appended to zImage, advance r10 so that the
 * dtb data will get relocated along with the kernel if necessary.
 */

  ldr lr, [r6, #0]
#ifndef __ARMEB__
  ldr r1, =0xedfe0dd0  @ sig is 0xd00dfeed big endian
#else
  ldr r1, =0xd00dfeed
#endif
  cmp lr, r1
  bne dtb_check_done  @ not found

#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
  /*
   * OK... Let's do some funky business here.
   * If we do have a DTB appended to zImage, and we do have
   * an ATAG list around, we want the later to be translated
   * and folded into the former here.  To be on the safe side,
   * let's temporarily move  the stack away into the malloc
   * area.  No GOT fixup has occurred yet, but none of the
   * code we're about to call uses any global variable.
  */
  add sp, sp, #0x10000
  stmfd sp!, {r0-r3, ip, lr}
  mov r0, r8
  mov r1, r6
  sub r2, sp, r6
  bl atags_to_fdt

  /*
   * If returned value is 1, there is no ATAG at the location
   * pointed by r8.  Try the typical 0x100 offset from start
   * of RAM and hope for the best.
   */
  cmp r0, #1
  sub r0, r4, #TEXT_OFFSET
  add r0, r0, #0x100
  mov r1, r6
  sub r2, sp, r6
  bleq atags_to_fdt

  ldmfd sp!, {r0-r3, ip, lr}
  sub sp, sp, #0x10000
#endif

  mov r8, r6   @ use the appended device tree

  /*
   * Make sure that the DTB doesn't end up in the final
   * kernel's .bss area. To do so, we adjust the decompressed
   * kernel size to compensate if that .bss size is larger
   * than the relocated code.
   */
  ldr r5, =_kernel_bss_size
  adr r1, wont_overwrite
  sub r1, r6, r1
  subs r1, r5, r1
  addhi r9, r9, r1

  /* Get the dtb's size */
  ldr r5, [r6, #4]
#ifndef __ARMEB__
  /* convert r5 (dtb size) to little endian */
  eor r1, r5, r5, ror #16
  bic r1, r1, #0x00ff0000
  mov r5, r5, ror #8
  eor r5, r5, r1, lsr #8
#endif

  /* preserve 64-bit alignment */
  add r5, r5, #7
  bic r5, r5, #7

  /* relocate some pointers past the appended dtb */
  add r6, r6, r5
  add r10, r10, r5
  add sp, sp, r5
dtb_check_done:
#endif

/*
 * Check to see if we will overwrite ourselves.
 *   r4  = final kernel address
 *   r9  = size of decompressed image
 *   r10 = end of this image, including  bss/stack/malloc space if non XIP
 * We basically want:
 *   r4 - 16k page directory >= r10 -> OK
 *   r4 + image length <= address of wont_overwrite -> OK
 */
  add r10, r10, #16384
  cmp r4, r10
  bhs wont_overwrite
  add r10, r4, r9
  adr r9, wont_overwrite
  cmp r10, r9
  bls wont_overwrite

/*
 * Relocate ourselves past the end of the decompressed kernel.
 *   r6  = _edata
 *   r10 = end of the decompressed kernel
 * Because we always copy ahead, we need to do it from the end and go
 * backward in case the source and destination overlap.
 */
  /*
   * Bump to the next 256-byte boundary with the size of
   * the relocation code added. This avoids overwriting
   * ourself when the offset is small.
   */
  add r10, r10, #((reloc_code_end - restart + 256) & ~255)
  bic r10, r10, #255

  /* Get start of code we want to copy and align it down. */
  adr r5, restart
  bic r5, r5, #31

/* Relocate the hyp vector base if necessary */
#ifdef CONFIG_ARM_VIRT_EXT
  mrs r0, spsr
  and r0, r0, #MODE_MASK
  cmp r0, #HYP_MODE
  bne 1f

  bl __hyp_get_vectors
  sub r0, r0, r5
  add r0, r0, r10
  bl __hyp_set_vectors
1:
#endif

  sub r9, r6, r5  @ size to copy
  add r9, r9, #31  @ rounded up to a multiple
  bic r9, r9, #31  @ ... of 32 bytes
  add r6, r9, r5
  add r9, r9, r10

1:  ldmdb r6!, {r0 - r3, r10 - r12, lr}
  cmp r6, r5
  stmdb r9!, {r0 - r3, r10 - r12, lr}
  bhi 1b

  /* Preserve offset to relocated code. */
  sub r6, r9, r6

#ifndef CONFIG_ZBOOT_ROM
  /* cache_clean_flush may use the stack, so relocate it */
  add sp, sp, r6
#endif

  bl cache_clean_flush

  adr r0, BSYM(restart)
  add r0, r0, r6
  mov pc, r0


wont_overwrite:
/*
 * If delta is zero, we are running at the address we were linked at.
 *   r0  = delta
 *   r2  = BSS start
 *   r3  = BSS end
 *   r4  = kernel execution address
 *   r5  = appended dtb size (0 if not present)
 *   r7  = architecture ID
 *   r8  = atags pointer
 *   r11 = GOT start
 *   r12 = GOT end
 *   sp  = stack pointer
 */
  orrs r1, r0, r5
  beq not_relocated

  add r11, r11, r0
  add r12, r12, r0

#ifndef CONFIG_ZBOOT_ROM
  /*
   * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
   * we need to fix up pointers into the BSS region.
   * Note that the stack pointer has already been fixed up.
   */
  add r2, r2, r0
  add r3, r3, r0

  /*
   * Relocate all entries in the GOT table.
   * Bump bss entries to _edata + dtb size
   */
1:  ldr r1, [r11, #0]  @ relocate entries in the GOT
  add r1, r1, r0  @ This fixes up C references
  cmp r1, r2   @ if entry >= bss_start &&
  cmphs r3, r1   @       bss_end > entry
  addhi r1, r1, r5  @    entry += dtb size
  str r1, [r11], #4  @ next entry
  cmp r11, r12
  blo 1b

  /* bump our bss pointers too */
  add r2, r2, r5
  add r3, r3, r5

#else

  /*
   * Relocate entries in the GOT table.  We only relocate
   * the entries that are outside the (relocated) BSS region.
   */
1:  ldr r1, [r11, #0]  @ relocate entries in the GOT
  cmp r1, r2   @ entry < bss_start ||
  cmphs r3, r1   @ _end < entry
  addlo r1, r1, r0  @ table.  This fixes up the
  str r1, [r11], #4  @ C references.
  cmp r11, r12
  blo 1b
#endif

not_relocated: mov r0, #0
1:  str r0, [r2], #4  @ clear bss
  str r0, [r2], #4
  str r0, [r2], #4
  str r0, [r2], #4
  cmp r2, r3
  blo 1b

/*
 * The C runtime environment should now be setup sufficiently.
 * Set up some pointers, and start decompressing.
 *   r4  = kernel execution address
 *   r7  = architecture ID
 *   r8  = atags pointer
 */
  mov r0, r4
  mov r1, sp   @ malloc space above stack
  add r2, sp, #0x10000 @ 64k max
  mov r3, r7
  bl decompress_kernel
  bl cache_clean_flush
  bl cache_off
  mov r1, r7   @ restore architecture number
  mov r2, r8   @ restore atags pointer

#ifdef CONFIG_ARM_VIRT_EXT
  mrs r0, spsr  @ Get saved CPU boot mode
  and r0, r0, #MODE_MASK
  cmp r0, #HYP_MODE  @ if not booted in HYP mode...
  bne __enter_kernel  @ boot kernel directly

  adr r12, .L__hyp_reentry_vectors_offset
  ldr r0, [r12]
  add r0, r0, r12

  bl __hyp_set_vectors
  __HVC(0)   @ otherwise bounce to hyp mode

  b .   @ should never be reached

  .align 2
.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
#else
  b __enter_kernel
#endif


// head.S (h:\work\kernel\linux-3.10.87\linux-3.10.87\arch\arm\kernel) 16335 2015/8/17

/*
 * Kernel startup entry point.
 * ---------------------------
 *
 * This is normally called from the decompressor code.  The requirements
 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
 * r1 = machine nr, r2 = atags or dtb pointer.
 *
 * This code is mostly position independent, so if you link the kernel at
 * 0xc0008000, you call this at __pa(0xc0008000).
 *
 * See linux/arch/arm/tools/mach-types for the complete list of machine
 * numbers for r1.
 *
 * We're trying to keep crap to a minimum; DO NOT add any machine specific
 * crap here - that's what the boot loader (or in extreme, well justified
 * circumstances, zImage) is for.
 */

 .arm

 __HEAD
ENTRY(stext)

 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
 THUMB( bx r9  ) @ If this is a Thumb-2 kernel,
 THUMB( .thumb   ) @ switch to Thumb now.
 THUMB(1:   )

#ifdef CONFIG_ARM_VIRT_EXT
 bl __hyp_stub_install
#endif
 @ ensure svc mode and all interrupts masked
 safe_svcmode_maskall r9

 mrc p15, 0, r9, c0, c0  @ get processor id
 bl __lookup_processor_type  @ r5=procinfo r9=cpuid
 movs r10, r5    @ invalid processor (r5=0)?
 THUMB( it eq )  @ force fixup-able long branch encoding
 beq __error_p   @ yes, error 'p'

#ifdef CONFIG_ARM_LPAE
 mrc p15, 0, r3, c0, c1, 4  @ read ID_MMFR0
 and r3, r3, #0xf   @ extract VMSA support
 cmp r3, #5    @ long-descriptor translation table format?
 THUMB( it lo )    @ force fixup-able long branch encoding
 blo __error_p   @ only classic page table format
#endif

#ifndef CONFIG_XIP_KERNEL
 adr r3, 2f
 ldmia r3, {r4, r8}
 sub r4, r3, r4   @ (PHYS_OFFSET - PAGE_OFFSET)
 add r8, r8, r4   @ PHYS_OFFSET
#else
 ldr r8, =PLAT_PHYS_OFFSET  @ always constant in this case
#endif

 /*
  * r1 = machine no, r2 = atags or dtb,
  * r8 = phys_offset, r9 = cpuid, r10 = procinfo
  */
 bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
 bl __fixup_smp
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 bl __fixup_pv_table
#endif
 bl __create_page_tables

 /*
  * The following calls CPU specific code in a position independent
  * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
  * xxx_proc_info structure selected by __lookup_processor_type
  * above.  On return, the CPU will be ready for the MMU to be
  * turned on, and r0 will hold the CPU control register value.
  */
 ldr r13, =__mmap_switched  @ address to jump to after   //goto head-common.S
      @ mmu has been enabled
 adr lr, BSYM(1f)   @ return (PIC) address
 mov r8, r4    @ set TTBR1 to swapper_pg_dir
 ARM( add pc, r10, #PROCINFO_INITFUNC )
 THUMB( add r12, r10, #PROCINFO_INITFUNC )
 THUMB( mov pc, r12    )
1: b __enable_mmu
ENDPROC(stext)


//head-common.S (h:\work\kernel\linux-3.10.87\linux-3.10.87\arch\arm\kernel) 5280 2015/8/17
__mmap_switched:
 adr r3, __mmap_switched_data

 ldmia r3!, {r4, r5, r6, r7}
 cmp r4, r5    @ Copy data segment if needed
1: cmpne r5, r6
 ldrne fp, [r4], #4
 strne fp, [r5], #4
 bne 1b

 mov fp, #0    @ Clear BSS (and zero fp)
1: cmp r6, r7
 strcc fp, [r6],#4
 bcc 1b

 ARM( ldmia r3, {r4, r5, r6, r7, sp})
 THUMB( ldmia r3, {r4, r5, r6, r7} )
 THUMB( ldr sp, [r3, #16]  )
 str r9, [r4]   @ Save processor ID
 str r1, [r5]   @ Save machine type
 str r2, [r6]   @ Save atags pointer
 cmp r7, #0
 bicne r4, r0, #CR_A   @ Clear 'A' bit
 stmneia r7, {r0, r4}   @ Save control register values
 b start_kernel    //goto main.c
ENDPROC(__mmap_switched)

//Main.c (h:\work\kernel\linux-3.10.87\linux-3.10.87\init) 22533 2015/8/17

asmlinkage void __init start_kernel(void)
{
 char * command_line;
 extern const struct kernel_param __start___param[], __stop___param[];

 /*
  * Need to run as early as possible, to initialize the
  * lockdep hash:
  */
 lockdep_init();
 smp_setup_processor_id();
 debug_objects_early_init();

 /*
  * Set up the the initial canary ASAP:
  */
 boot_init_stack_canary();

 cgroup_init_early();

 local_irq_disable();
 early_boot_irqs_disabled = true;

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
 boot_cpu_init();
 page_address_init();
 pr_notice("%s", linux_banner);
 setup_arch(&command_line);       //set ==>arch/arm/
 mm_init_owner(&init_mm, &init_task);
 mm_init_cpumask(&init_mm);
 setup_command_line(command_line);
 setup_nr_cpu_ids();
 setup_per_cpu_areas();
 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */

 build_all_zonelists(NULL, NULL);
 page_alloc_init();

 pr_notice("Kernel command line: %s\n", boot_command_line);
 parse_early_param();
 parse_args("Booting kernel", static_command_line, __start___param,
     __stop___param - __start___param,
     -1, -1, &unknown_bootoption);

 jump_label_init();

 /*
  * These use large bootmem allocations and must precede
  * kmem_cache_init()
  */
 setup_log_buf(0);
 pidhash_init();
 vfs_caches_init_early();
 sort_main_extable();
 trap_init();
 mm_init();

 /*
  * Set up the scheduler prior starting any interrupts (such as the
  * timer interrupt). Full topology setup happens at smp_init()
  * time - but meanwhile we still have a functioning scheduler.
  */
 sched_init();
 /*
  * Disable preemption - early bootup scheduling is extremely
  * fragile until we cpu_idle() for the first time.
  */
 preempt_disable();
 if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n"))
  local_irq_disable();
 idr_init_cache();
 perf_event_init();
 rcu_init();
 tick_nohz_init();
 radix_tree_init();
 /* init some links before init_ISA_irqs() */
 early_irq_init();
 init_IRQ();
 tick_init();
 init_timers();
 hrtimers_init();
 softirq_init();
 timekeeping_init();
 time_init();
 profile_init();
 call_function_init();
 WARN(!irqs_disabled(), "Interrupts were enabled early\n");
 early_boot_irqs_disabled = false;
 local_irq_enable();

 kmem_cache_init_late();

 /*
  * HACK ALERT! This is early. We're enabling the console before
  * we've done PCI setups etc, and console_init() must be aware of
  * this. But we do want output early, in case something goes wrong.
  */
 console_init();
 if (panic_later)
  panic(panic_later, panic_param);

 lockdep_info();

 /*
  * Need to run this when irqs are enabled, because it wants
  * to self-test [hard/soft]-irqs on/off lock inversion bugs
  * too:
  */
 locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
 if (initrd_start && !initrd_below_start_ok &&
     page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
  pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
      page_to_pfn(virt_to_page((void *)initrd_start)),
      min_low_pfn);
  initrd_start = 0;
 }
#endif
 page_cgroup_init();
 debug_objects_mem_init();
 kmemleak_init();
 setup_per_cpu_pageset();
 numa_policy_init();
 if (late_time_init)
  late_time_init();
 sched_clock_init();
 calibrate_delay();
 pidmap_init();
 anon_vma_init();
#ifdef CONFIG_X86
 if (efi_enabled(EFI_RUNTIME_SERVICES))
  efi_enter_virtual_mode();
#endif
#ifdef CONFIG_X86_ESPFIX64
 /* Should be run before the first non-init thread is created */
 init_espfix_bsp();
#endif
 thread_info_cache_init();
 cred_init();
 fork_init(totalram_pages);
 proc_caches_init();
 buffer_init();
 key_init();
 security_init();
 dbg_late_init();
 vfs_caches_init(totalram_pages);
 signals_init();
 /* rootfs populating might need page-writeback */
 page_writeback_init();
#ifdef CONFIG_PROC_FS
 proc_root_init();
#endif
 cgroup_init();
 cpuset_init();
 taskstats_init_early();
 delayacct_init();

 check_bugs();

 acpi_early_init(); /* before LAPIC and SMP init */
 sfi_init_late();

 if (efi_enabled(EFI_RUNTIME_SERVICES)) {
  efi_late_init();
  efi_free_boot_services();
 }

 ftrace_init();

 /* Do the rest non-__init'ed, we're now alive */
 rest_init();
}

void __init setup_arch(char **cmdline_p)
{
 const struct machine_desc *mdesc;

 setup_processor();
 mdesc = setup_machine_fdt(__atags_pointer);      //set machine
 if (!mdesc)
  mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
 machine_desc = mdesc;
 machine_name = mdesc->name;
 dump_stack_set_arch_desc("%s", mdesc->name);

 if (mdesc->reboot_mode != REBOOT_HARD)
  reboot_mode = mdesc->reboot_mode;

 init_mm.start_code = (unsigned long) _text;
 init_mm.end_code   = (unsigned long) _etext;
 init_mm.end_data   = (unsigned long) _edata;
 init_mm.brk    = (unsigned long) _end;

 /* populate cmd_line too for later use, preserving boot_command_line */
 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 *cmdline_p = cmd_line;

 parse_early_param();

 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
 setup_dma_zone(mdesc);
 sanity_check_meminfo();
 arm_memblock_init(mdesc);

 paging_init(mdesc);
 request_standard_resources(mdesc);

 if (mdesc->restart)
  arm_pm_restart = mdesc->restart;

 unflatten_device_tree();         // malloc platform_device struct and join in bus

 arm_dt_init_cpu_maps();
 psci_init();
#ifdef CONFIG_SMP
 if (is_smp()) {
  if (!mdesc->smp_init || !mdesc->smp_init()) {
   if (psci_smp_available())
    smp_set_ops(&psci_smp_ops);
   else if (mdesc->smp)
    smp_set_ops(mdesc->smp);
  }
  smp_init_cpus();
  smp_build_mpidr_hash();
 }
#endif

 if (!is_smp())
  hyp_mode_check();

 reserve_crashkernel();

#ifdef CONFIG_MULTI_IRQ_HANDLER
 handle_arch_irq = mdesc->handle_irq;
#endif

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
 conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
 conswitchp = &dummy_con;
#endif
#endif

 if (mdesc->init_early)
  mdesc->init_early();
}

device tree 相关的结构体


struct machine_desc {
 unsigned int  nr;  /* architecture number */
 const char  *name;  /* architecture name */
 unsigned long  atag_offset; /* tagged list (relative) */
 const char *const  *dt_compat; /* array of device tree
       * 'compatible' strings */

 unsigned int  nr_irqs; /* number of IRQs */

#ifdef CONFIG_ZONE_DMA
 unsigned long  dma_zone_size; /* size of DMA-able area */
#endif

 unsigned int  video_start; /* start of video RAM */
 unsigned int  video_end; /* end of video RAM */

 unsigned char  reserve_lp0 :1; /* never has lp0 */
 unsigned char  reserve_lp1 :1; /* never has lp1 */
 unsigned char  reserve_lp2 :1; /* never has lp2 */
 char   restart_mode; /* default restart mode */
 struct smp_operations *smp;  /* SMP operations */
 void   (*fixup)(struct tag *, char **,
      struct meminfo *);
 void   (*reserve)(void);/* reserve mem blocks */
 void   (*map_io)(void);/* IO mapping function */
 void   (*init_early)(void);
 void   (*init_irq)(void);
 void   (*init_time)(void);
 void   (*init_machine)(void);
 void   (*init_late)(void);
#ifdef CONFIG_MULTI_IRQ_HANDLER
 void   (*handle_irq)(struct pt_regs *);
#endif
 void   (*restart)(char, const char *);
}

/*
 * This is what gets passed to the kernel by prom_init or kexec
 *
 * The dt struct contains the device tree structure, full pathes and
 * property contents. The dt strings contain a separate block with just
 * the strings for the property names, and is fully page aligned and
 * self contained in a page, so that it can be kept around by the kernel,
 * each property name appears only once in this page (cheap compression)
 *
 * the mem_rsvmap contains a map of reserved ranges of physical memory,
 * passing it here instead of in the device-tree itself greatly simplifies
 * the job of everybody. It's just a list of u64 pairs (base/size) that
 * ends when size is 0
 */
struct boot_param_header {
 __be32 magic;   /* magic word OF_DT_HEADER */
 __be32 totalsize;  /* total size of DT block */
 __be32 off_dt_struct;  /* offset to structure */
 __be32 off_dt_strings;  /* offset to strings */
 __be32 off_mem_rsvmap;  /* offset to memory reserve map */
 __be32 version;  /* format version */
 __be32 last_comp_version; /* last compatible version */
 /* version 2 fields below */
 __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */
 /* version 3 fields below */
 __be32 dt_strings_size; /* size of the DT strings block */
 /* version 17 fields below */
 __be32 dt_struct_size;  /* size of the DT structure block */
}

struct device_node {
 const char *name;
 const char *type;
 phandle phandle;
 const char *full_name;

 struct property *properties;
 struct property *deadprops; /* removed properties */
 struct device_node *parent;
 struct device_node *child;
 struct device_node *sibling;
 struct device_node *next; /* next device of same type */
 struct device_node *allnext; /* next in list of all nodes */
 struct proc_dir_entry *pde; /* this node's proc directory */
 struct kref kref;
 unsigned long _flags;
 void *data;
#if defined(CONFIG_SPARC)
 const char *path_component_name;
 unsigned int unique_id;
 struct of_irq_controller *irq_trans;
#endif
}

未完待续....


 

猜你喜欢

转载自blog.csdn.net/u013926029/article/details/78445186