From 02f4830b17ce2f806fabeb2c0a9fdbec9577d47c Mon Sep 17 00:00:00 2001 From: heyuanjie87 <943313837@qq.com> Date: Tue, 10 May 2022 22:21:02 +0800 Subject: [PATCH 1/4] =?UTF-8?q?=E6=B7=BB=E5=8A=A0LWP=E4=BD=BF=E8=83=BD?= =?UTF-8?q?=E5=88=A4=E6=96=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libcpu/aarch64/common/context_gcc.S | 26 ++++++++++++++++++-------- libcpu/aarch64/common/mmu.c | 25 +++++++++++++++++++++++++ libcpu/aarch64/common/trap.c | 6 ++++-- libcpu/aarch64/cortex-a/entry_point.S | 6 ++++++ 4 files changed, 53 insertions(+), 10 deletions(-) diff --git a/libcpu/aarch64/common/context_gcc.S b/libcpu/aarch64/common/context_gcc.S index 3711342240..5570383590 100644 --- a/libcpu/aarch64/common/context_gcc.S +++ b/libcpu/aarch64/common/context_gcc.S @@ -171,22 +171,22 @@ rt_hw_get_gtimer_frq: LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 RESTORE_FPU SP - +#ifdef RT_USING_LWP BEQ ret_to_user - +#endif ERET .endm #else .macro RESTORE_CONTEXT /* Set the SP to point to the stack of the task being restored. */ MOV SP, X0 - +#ifdef RT_USING_LWP BL rt_thread_self MOV X19, X0 BL lwp_mmu_switch MOV X0, X19 BL lwp_user_setting_restore - +#endif LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */ TST X3, #0x1f @@ -214,9 +214,9 @@ rt_hw_get_gtimer_frq: LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 RESTORE_FPU SP - +#ifdef RT_USING_LWP BEQ ret_to_user - +#endif ERET .endm #endif @@ -250,9 +250,9 @@ rt_hw_get_gtimer_frq: LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 RESTORE_FPU SP - +#ifdef RT_USING_LWP BEQ ret_to_user - +#endif ERET .endm @@ -301,8 +301,10 @@ rt_hw_context_switch_to: MOV SP, X0 MOV X0, X1 BL rt_cpus_lock_status_restore +#ifdef RT_USING_LWP BL rt_thread_self BL lwp_user_setting_restore +#endif B rt_hw_context_switch_exit /* @@ -321,8 +323,10 @@ rt_hw_context_switch: MOV SP, X0 MOV X0, X2 BL rt_cpus_lock_status_restore +#ifdef RT_USING_LWP BL rt_thread_self BL lwp_user_setting_restore +#endif B rt_hw_context_switch_exit /* @@ -337,8 +341,10 @@ rt_hw_context_switch_interrupt: STP X0, X1, [SP, #-0x10]! STP X2, X3, [SP, #-0x10]! STP X29, X30, [SP, #-0x10]! +#ifdef RT_USING_LWP BL rt_thread_self BL lwp_user_setting_save +#endif LDP X29, X30, [SP], #0x10 LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 @@ -349,7 +355,9 @@ rt_hw_context_switch_interrupt: MOV X19, X0 BL rt_cpus_lock_status_restore MOV X0, X19 +#ifdef RT_USING_LWP BL lwp_user_setting_restore +#endif B rt_hw_context_switch_exit .globl vector_fiq @@ -420,8 +428,10 @@ rt_hw_context_switch_interrupt: MOV X7, #1 // set rt_thread_switch_interrupt_flag to 1 STR X7, [X6] STP X1, X30, [SP, #-0x10]! +#ifdef RT_USING_LWP MOV X0, X2 BL lwp_user_setting_save +#endif LDP X1, X30, [SP], #0x10 _reswitch: LDR X6, =rt_interrupt_to_thread // set rt_interrupt_to_thread diff --git a/libcpu/aarch64/common/mmu.c b/libcpu/aarch64/common/mmu.c index 623bd6a276..11ad027ed5 100644 --- a/libcpu/aarch64/common/mmu.c +++ b/libcpu/aarch64/common/mmu.c @@ -137,6 +137,7 @@ int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa return 0; } +#ifdef RT_USING_LWP static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) { int level; @@ -196,6 +197,12 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon return 0; } +#else +static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) +{ + return 0; +} +#endif struct mmu_level_info { @@ -203,6 +210,7 @@ struct mmu_level_info void *page; }; +#ifdef RT_USING_LWP static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr) { int level; @@ -330,6 +338,18 @@ err: _kenrel_unmap_4K(lv0_tbl, (void *)va); return ret; } +#else +static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr) +{ + +} + +static int _kenrel_map_4K(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) +{ + + return 0; +} +#endif int kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr) { @@ -928,5 +948,10 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo { while (1); } +} +#else +void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off) +{ + } #endif diff --git a/libcpu/aarch64/common/trap.c b/libcpu/aarch64/common/trap.c index b9e9f04e75..103dd6fff4 100644 --- a/libcpu/aarch64/common/trap.c +++ b/libcpu/aarch64/common/trap.c @@ -236,16 +236,18 @@ void rt_hw_trap_exception(struct rt_hw_exp_stack *regs) SVC_Handler(regs); /* never return here */ } - +#ifdef RT_USING_LWP if (check_user_stack(esr, regs)) { return; } - +#endif process_exception(esr, regs->pc); rt_hw_show_register(regs); rt_kprintf("current: %s\n", rt_thread_self()->name); +#ifdef RT_USING_LWP check_user_fault(regs, 0, "user fault"); +#endif #ifdef RT_USING_FINSH list_thread(); #endif diff --git a/libcpu/aarch64/cortex-a/entry_point.S b/libcpu/aarch64/cortex-a/entry_point.S index b652739b4d..6b32810df5 100644 --- a/libcpu/aarch64/cortex-a/entry_point.S +++ b/libcpu/aarch64/cortex-a/entry_point.S @@ -76,7 +76,11 @@ __start: eret /* exception return. from EL2. continue from .L__in_el1 */ .L__in_el1: +#ifdef RT_USING_LWP ldr x9, =PV_OFFSET +#else + mov x9, #0 +#endif mov sp, x1 /* in EL1. Set sp to _start */ /* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */ @@ -118,7 +122,9 @@ __start: dsb sy ldr x2, =0x40000000 /* map 1G memory for kernel space */ +#ifdef RT_USING_LWP ldr x3, =PV_OFFSET +#endif bl rt_hw_mmu_setup_early ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */ -- Gitee From 833ad2488e48af587615145e093f7045c53191ac Mon Sep 17 00:00:00 2001 From: heyuanjie87 <943313837@qq.com> Date: Wed, 11 May 2022 19:32:08 +0800 Subject: [PATCH 2/4] =?UTF-8?q?=E9=80=82=E9=85=8D=E4=B8=8D=E5=BC=80?= =?UTF-8?q?=E5=90=AFlwp=E7=9A=84=E6=83=85=E5=86=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libcpu/aarch64/common/mmu.c | 18 ++++++++++++++++++ libcpu/aarch64/cortex-a/entry_point.S | 2 ++ 2 files changed, 20 insertions(+) diff --git a/libcpu/aarch64/common/mmu.c b/libcpu/aarch64/common/mmu.c index 11ad027ed5..f11cd7f414 100644 --- a/libcpu/aarch64/common/mmu.c +++ b/libcpu/aarch64/common/mmu.c @@ -952,6 +952,24 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo #else void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off) { + int ret; + unsigned long va = KERNEL_VADDR_START; + unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT; + unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM); + /* clean the first two pages */ + mmu_memset((char *)tbl0, 0, ARCH_PAGE_SIZE); + mmu_memset((char *)tbl1, 0, ARCH_PAGE_SIZE); + + ret = armv8_init_map_2M(tbl1, va, va + pv_off, count, normal_attr); + if (ret != 0) + { + while (1); + } + ret = armv8_init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr); + if (ret != 0) + { + while (1); + } } #endif diff --git a/libcpu/aarch64/cortex-a/entry_point.S b/libcpu/aarch64/cortex-a/entry_point.S index 6b32810df5..e9a511ea78 100644 --- a/libcpu/aarch64/cortex-a/entry_point.S +++ b/libcpu/aarch64/cortex-a/entry_point.S @@ -148,11 +148,13 @@ __start: ret after_mmu_enable: +#ifdef RT_USING_LWP mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */ orr x0, x0, #(1 << 7) msr tcr_el1, x0 msr ttbr0_el1, xzr dsb sy +#endif mov x0, #1 msr spsel, x0 -- Gitee From c57fff4a7370492af111ad06c5fd99df6046b388 Mon Sep 17 00:00:00 2001 From: heyuanjie87 <943313837@qq.com> Date: Wed, 11 May 2022 23:12:23 +0800 Subject: [PATCH 3/4] =?UTF-8?q?z=E6=B7=BB=E5=8A=A0=E9=9D=9Elwp=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E4=B8=8B=E9=A1=B5=E8=A1=A8=E5=88=9B=E5=BB=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libcpu/aarch64/common/mmu.c | 214 +++++++++++++++++++++++------------- 1 file changed, 139 insertions(+), 75 deletions(-) diff --git a/libcpu/aarch64/common/mmu.c b/libcpu/aarch64/common/mmu.c index f11cd7f414..65b7890193 100644 --- a/libcpu/aarch64/common/mmu.c +++ b/libcpu/aarch64/common/mmu.c @@ -34,29 +34,34 @@ #define MMU_TBL_PAGE_4k_LEVEL 3 #define MMU_TBL_LEVEL_NR 4 +#define MMU_TBL_PAGE_NR_MAX 32 + void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr); struct page_table { - unsigned long page[512]; + unsigned long entry[512]; }; -static struct page_table *__init_page_array; -static unsigned long __page_off = 0UL; -unsigned long get_free_page(void) +/* only map 4G io/memory */ +volatile unsigned long MMUTable[512] __attribute__((aligned(4096))); +static volatile struct page_table MMUPage[MMU_TBL_PAGE_NR_MAX] __attribute__((aligned(4096))); + +static unsigned long _kernel_free_page(void) { - if (!__init_page_array) + static unsigned long i = 0; + + if (i >= MMU_TBL_PAGE_NR_MAX) { - unsigned long temp_page_start; - asm volatile("mov %0, sp":"=r"(temp_page_start)); - __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK)); - __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */ + return RT_NULL; } - __page_off++; - return (unsigned long)(__init_page_array[__page_off - 1].page); + + ++i; + + return (unsigned long)&MMUPage[i - 1].entry; } -void mmu_memset(char *dst, char v, size_t len) +static void mmu_memset(char *dst, char v, size_t len) { while (len--) { @@ -64,7 +69,8 @@ void mmu_memset(char *dst, char v, size_t len) } } -static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) +#ifdef RT_USING_LWP +static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) { int level; unsigned long *cur_lv_tbl = lv0_tbl; @@ -86,13 +92,23 @@ static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigne off &= MMU_LEVEL_MASK; if (!(cur_lv_tbl[off] & MMU_TYPE_USED)) { - page = get_free_page(); + page = (unsigned long)rt_pages_alloc(0); if (!page) { return MMU_MAP_ERROR_NOPAGE; } - mmu_memset((char *)page, 0, ARCH_PAGE_SIZE); - cur_lv_tbl[off] = page | MMU_TYPE_TABLE; + rt_memset((char *)page, 0, ARCH_PAGE_SIZE); + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE); + cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE; + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *)); + } + else + { + page = cur_lv_tbl[off]; + page &= MMU_ADDRESS_MASK; + /* page to va */ + page -= PV_OFFSET; + rt_page_ref_inc((void *)page, 0); } page = cur_lv_tbl[off]; if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK) @@ -101,6 +117,7 @@ static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigne return MMU_MAP_ERROR_CONFLICT; } cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK); + cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET); level_shift -= MMU_LEVEL_SHIFT; } attr &= MMU_ATTRIB_MASK; @@ -108,40 +125,15 @@ static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigne off = (va >> ARCH_SECTION_SHIFT); off &= MMU_LEVEL_MASK; cur_lv_tbl[off] = pa; - return 0; -} - -int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr) -{ - unsigned long i; - int ret; + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *)); - if (va & ARCH_SECTION_MASK) - { - return -1; - } - if (pa & ARCH_SECTION_MASK) - { - return -1; - } - for (i = 0; i < count; i++) - { - ret = _map_single_page_2M(lv0_tbl, va, pa, attr); - va += ARCH_SECTION_SIZE; - pa += ARCH_SECTION_SIZE; - if (ret != 0) - { - return ret; - } - } return 0; } - -#ifdef RT_USING_LWP -static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) +#else +static int _kenrel_map_2M(unsigned long *tbl, unsigned long va, unsigned long pa, unsigned long attr) { int level; - unsigned long *cur_lv_tbl = lv0_tbl; + unsigned long *cur_lv_tbl = tbl; unsigned long page; unsigned long off; int level_shift = MMU_ADDRESS_BITS; @@ -154,42 +146,46 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon { return MMU_MAP_ERROR_PANOTALIGN; } - for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++) + + for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; ++level) { off = (va >> level_shift); off &= MMU_LEVEL_MASK; + if (!(cur_lv_tbl[off] & MMU_TYPE_USED)) { - page = (unsigned long)rt_pages_alloc(0); + page = _kernel_free_page(); + if (!page) { return MMU_MAP_ERROR_NOPAGE; } + rt_memset((char *)page, 0, ARCH_PAGE_SIZE); rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE); - cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE; + cur_lv_tbl[off] = page | MMU_TYPE_TABLE; rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *)); } else { page = cur_lv_tbl[off]; page &= MMU_ADDRESS_MASK; - /* page to va */ - page -= PV_OFFSET; - rt_page_ref_inc((void *)page, 0); } + page = cur_lv_tbl[off]; if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK) { /* is block! error! */ return MMU_MAP_ERROR_CONFLICT; } + + /* next level */ cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK); - cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET); level_shift -= MMU_LEVEL_SHIFT; } + attr &= MMU_ATTRIB_MASK; - pa |= (attr | MMU_TYPE_BLOCK); /* block */ + pa |= (attr | MMU_TYPE_BLOCK); off = (va >> ARCH_SECTION_SHIFT); off &= MMU_LEVEL_MASK; cur_lv_tbl[off] = pa; @@ -197,11 +193,6 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon return 0; } -#else -static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) -{ - return 0; -} #endif struct mmu_level_info @@ -459,8 +450,12 @@ void rt_hw_mmu_setmtt(unsigned long vaddrStart, void kernel_mmu_switch(unsigned long tbl) { +#ifdef RT_USING_USERSPACE tbl += PV_OFFSET; __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory"); +#else + __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory"); +#endif __asm__ volatile("tlbi vmalle1\n dsb sy\nisb":::"memory"); __asm__ volatile("ic ialluis\n dsb sy\nisb":::"memory"); } @@ -926,30 +921,98 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr) return ret; } -#ifdef RT_USING_USERSPACE -void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off) + +/////////////////////////////////////////////////////// +static struct page_table *__init_page_array; +static unsigned long __page_off = 0UL; + +static unsigned long get_free_page(void) { - int ret; - unsigned long va = KERNEL_VADDR_START; - unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT; - unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM); + if (!__init_page_array) + { + unsigned long temp_page_start; + asm volatile("mov %0, sp":"=r"(temp_page_start)); + __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK)); + __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */ + } + __page_off++; - /* clean the first two pages */ - mmu_memset((char *)tbl0, 0, ARCH_PAGE_SIZE); - mmu_memset((char *)tbl1, 0, ARCH_PAGE_SIZE); + return (unsigned long)(__init_page_array[__page_off - 1].entry); +} - ret = armv8_init_map_2M(tbl1, va, va + pv_off, count, normal_attr); - if (ret != 0) +static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) +{ + int level; + unsigned long *cur_lv_tbl = lv0_tbl; + unsigned long page; + unsigned long off; + int level_shift = MMU_ADDRESS_BITS; + + if (va & ARCH_SECTION_MASK) { - while (1); + return MMU_MAP_ERROR_VANOTALIGN; } - ret = armv8_init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr); - if (ret != 0) + if (pa & ARCH_SECTION_MASK) { - while (1); + return MMU_MAP_ERROR_PANOTALIGN; + } + for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++) + { + off = (va >> level_shift); + off &= MMU_LEVEL_MASK; + if (!(cur_lv_tbl[off] & MMU_TYPE_USED)) + { + page = get_free_page(); + if (!page) + { + return MMU_MAP_ERROR_NOPAGE; + } + mmu_memset((char *)page, 0, ARCH_PAGE_SIZE); + cur_lv_tbl[off] = page | MMU_TYPE_TABLE; + } + page = cur_lv_tbl[off]; + if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK) + { + /* is block! error! */ + return MMU_MAP_ERROR_CONFLICT; + } + cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK); + level_shift -= MMU_LEVEL_SHIFT; } + attr &= MMU_ATTRIB_MASK; + pa |= (attr | MMU_TYPE_BLOCK); /* block */ + off = (va >> ARCH_SECTION_SHIFT); + off &= MMU_LEVEL_MASK; + cur_lv_tbl[off] = pa; + return 0; } -#else + +int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr) +{ + unsigned long i; + int ret; + + if (va & ARCH_SECTION_MASK) + { + return -1; + } + if (pa & ARCH_SECTION_MASK) + { + return -1; + } + for (i = 0; i < count; i++) + { + ret = _map_single_page_2M(lv0_tbl, va, pa, attr); + va += ARCH_SECTION_SIZE; + pa += ARCH_SECTION_SIZE; + if (ret != 0) + { + return ret; + } + } + return 0; +} + void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off) { int ret; @@ -959,6 +1022,7 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo /* clean the first two pages */ mmu_memset((char *)tbl0, 0, ARCH_PAGE_SIZE); +#ifdef RT_USING_USERSPACE mmu_memset((char *)tbl1, 0, ARCH_PAGE_SIZE); ret = armv8_init_map_2M(tbl1, va, va + pv_off, count, normal_attr); @@ -966,10 +1030,10 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo { while (1); } +#endif ret = armv8_init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr); if (ret != 0) { while (1); } } -#endif -- Gitee From f58af0b7672af94aae2260bfd80504ff02d13ca3 Mon Sep 17 00:00:00 2001 From: heyuanjie87 <943313837@qq.com> Date: Fri, 13 May 2022 16:34:10 +0800 Subject: [PATCH 4/4] =?UTF-8?q?=E5=9C=A8=E9=9D=9E=E7=94=A8=E6=88=B7?= =?UTF-8?q?=E6=80=81=E6=83=85=E5=86=B5=E4=B8=8B=E5=AE=9E=E7=8E=B0rt=5Fallo?= =?UTF-8?q?c=5Fpage=E5=8A=9F=E8=83=BD=E4=BB=A5=E5=87=8F=E5=B0=91=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=E8=B5=98=E4=BD=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libcpu/aarch64/common/mmu.c | 231 ++++++++++++------------------------ 1 file changed, 76 insertions(+), 155 deletions(-) diff --git a/libcpu/aarch64/common/mmu.c b/libcpu/aarch64/common/mmu.c index 65b7890193..8c1c53a2d3 100644 --- a/libcpu/aarch64/common/mmu.c +++ b/libcpu/aarch64/common/mmu.c @@ -14,7 +14,7 @@ #include "mmu.h" -#ifdef RT_USING_USERSPACE +#ifdef RT_USING_LWP #include #endif @@ -34,20 +34,24 @@ #define MMU_TBL_PAGE_4k_LEVEL 3 #define MMU_TBL_LEVEL_NR 4 -#define MMU_TBL_PAGE_NR_MAX 32 - void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr); struct page_table { - unsigned long entry[512]; + unsigned long page[512]; }; -/* only map 4G io/memory */ -volatile unsigned long MMUTable[512] __attribute__((aligned(4096))); +#ifndef RT_USING_LWP +#define MMU_TBL_PAGE_NR_MAX 32 + +#undef PV_OFFSET +#define PV_OFFSET 0 + static volatile struct page_table MMUPage[MMU_TBL_PAGE_NR_MAX] __attribute__((aligned(4096))); -static unsigned long _kernel_free_page(void) +#define rt_page_ref_inc(...) + +unsigned long rt_pages_alloc(rt_size_t size_bits) { static unsigned long i = 0; @@ -58,10 +62,26 @@ static unsigned long _kernel_free_page(void) ++i; - return (unsigned long)&MMUPage[i - 1].entry; + return (unsigned long)&MMUPage[i - 1].page; } +#endif -static void mmu_memset(char *dst, char v, size_t len) +static struct page_table *__init_page_array; +static unsigned long __page_off = 0UL; +unsigned long get_free_page(void) +{ + if (!__init_page_array) + { + unsigned long temp_page_start; + asm volatile("mov %0, sp":"=r"(temp_page_start)); + __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK)); + __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */ + } + __page_off++; + return (unsigned long)(__init_page_array[__page_off - 1].page); +} + +void mmu_memset(char *dst, char v, size_t len) { while (len--) { @@ -69,8 +89,7 @@ static void mmu_memset(char *dst, char v, size_t len) } } -#ifdef RT_USING_LWP -static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) +static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) { int level; unsigned long *cur_lv_tbl = lv0_tbl; @@ -92,23 +111,13 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon off &= MMU_LEVEL_MASK; if (!(cur_lv_tbl[off] & MMU_TYPE_USED)) { - page = (unsigned long)rt_pages_alloc(0); + page = get_free_page(); if (!page) { return MMU_MAP_ERROR_NOPAGE; } - rt_memset((char *)page, 0, ARCH_PAGE_SIZE); - rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE); - cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE; - rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *)); - } - else - { - page = cur_lv_tbl[off]; - page &= MMU_ADDRESS_MASK; - /* page to va */ - page -= PV_OFFSET; - rt_page_ref_inc((void *)page, 0); + mmu_memset((char *)page, 0, ARCH_PAGE_SIZE); + cur_lv_tbl[off] = page | MMU_TYPE_TABLE; } page = cur_lv_tbl[off]; if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK) @@ -117,7 +126,6 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon return MMU_MAP_ERROR_CONFLICT; } cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK); - cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET); level_shift -= MMU_LEVEL_SHIFT; } attr &= MMU_ATTRIB_MASK; @@ -125,15 +133,39 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon off = (va >> ARCH_SECTION_SHIFT); off &= MMU_LEVEL_MASK; cur_lv_tbl[off] = pa; - rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *)); + return 0; +} +int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr) +{ + unsigned long i; + int ret; + + if (va & ARCH_SECTION_MASK) + { + return -1; + } + if (pa & ARCH_SECTION_MASK) + { + return -1; + } + for (i = 0; i < count; i++) + { + ret = _map_single_page_2M(lv0_tbl, va, pa, attr); + va += ARCH_SECTION_SIZE; + pa += ARCH_SECTION_SIZE; + if (ret != 0) + { + return ret; + } + } return 0; } -#else -static int _kenrel_map_2M(unsigned long *tbl, unsigned long va, unsigned long pa, unsigned long attr) + +static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) { int level; - unsigned long *cur_lv_tbl = tbl; + unsigned long *cur_lv_tbl = lv0_tbl; unsigned long page; unsigned long off; int level_shift = MMU_ADDRESS_BITS; @@ -146,46 +178,42 @@ static int _kenrel_map_2M(unsigned long *tbl, unsigned long va, unsigned long pa { return MMU_MAP_ERROR_PANOTALIGN; } - - for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; ++level) + for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++) { off = (va >> level_shift); off &= MMU_LEVEL_MASK; - if (!(cur_lv_tbl[off] & MMU_TYPE_USED)) { - page = _kernel_free_page(); - + page = (unsigned long)rt_pages_alloc(0); if (!page) { return MMU_MAP_ERROR_NOPAGE; } - rt_memset((char *)page, 0, ARCH_PAGE_SIZE); rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE); - cur_lv_tbl[off] = page | MMU_TYPE_TABLE; + cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE; rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *)); } else { page = cur_lv_tbl[off]; page &= MMU_ADDRESS_MASK; + /* page to va */ + page -= PV_OFFSET; + rt_page_ref_inc((void *)page, 0); } - page = cur_lv_tbl[off]; if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK) { /* is block! error! */ return MMU_MAP_ERROR_CONFLICT; } - - /* next level */ cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK); + cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET); level_shift -= MMU_LEVEL_SHIFT; } - attr &= MMU_ATTRIB_MASK; - pa |= (attr | MMU_TYPE_BLOCK); + pa |= (attr | MMU_TYPE_BLOCK); /* block */ off = (va >> ARCH_SECTION_SHIFT); off &= MMU_LEVEL_MASK; cur_lv_tbl[off] = pa; @@ -193,7 +221,6 @@ static int _kenrel_map_2M(unsigned long *tbl, unsigned long va, unsigned long pa return 0; } -#endif struct mmu_level_info { @@ -329,17 +356,6 @@ err: _kenrel_unmap_4K(lv0_tbl, (void *)va); return ret; } -#else -static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr) -{ - -} - -static int _kenrel_map_4K(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) -{ - - return 0; -} #endif int kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr) @@ -450,7 +466,7 @@ void rt_hw_mmu_setmtt(unsigned long vaddrStart, void kernel_mmu_switch(unsigned long tbl) { -#ifdef RT_USING_USERSPACE +#ifdef RT_USING_LWP tbl += PV_OFFSET; __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory"); #else @@ -570,7 +586,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages) return 0; } -#ifdef RT_USING_USERSPACE +#ifdef RT_USING_LWP static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages) { size_t loop_va; @@ -607,7 +623,6 @@ static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages) } return 0; } -#endif static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t npages) { @@ -653,13 +668,14 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, si } return ret; } +#endif static void rt_hw_cpu_tlb_invalidate(void) { __asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n"); } -#ifdef RT_USING_USERSPACE +#ifdef RT_USING_LWP void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr) { size_t pa_s, pa_e; @@ -720,7 +736,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at pages = pa_e - pa_s + 1; vaddr = find_vaddr(mmu_info, pages); if (vaddr) { - ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr); + //TODO ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr); if (ret == 0) { rt_hw_cpu_tlb_invalidate(); @@ -731,7 +747,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at } #endif -#ifdef RT_USING_USERSPACE +#ifdef RT_USING_LWP static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t npages, size_t attr) { size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK; @@ -812,7 +828,6 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size } return 0; } -#endif void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size) { @@ -828,7 +843,6 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size) rt_hw_cpu_tlb_invalidate(); } -#ifdef RT_USING_USERSPACE void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr) { void *ret; @@ -850,7 +864,6 @@ void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_ rt_hw_interrupt_enable(level); return ret; } -#endif void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size) { @@ -860,6 +873,7 @@ void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size) _rt_hw_mmu_unmap(mmu_info, v_addr, size); rt_hw_interrupt_enable(level); } +#endif void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr) { @@ -922,97 +936,6 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr) } -/////////////////////////////////////////////////////// -static struct page_table *__init_page_array; -static unsigned long __page_off = 0UL; - -static unsigned long get_free_page(void) -{ - if (!__init_page_array) - { - unsigned long temp_page_start; - asm volatile("mov %0, sp":"=r"(temp_page_start)); - __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK)); - __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */ - } - __page_off++; - - return (unsigned long)(__init_page_array[__page_off - 1].entry); -} - -static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr) -{ - int level; - unsigned long *cur_lv_tbl = lv0_tbl; - unsigned long page; - unsigned long off; - int level_shift = MMU_ADDRESS_BITS; - - if (va & ARCH_SECTION_MASK) - { - return MMU_MAP_ERROR_VANOTALIGN; - } - if (pa & ARCH_SECTION_MASK) - { - return MMU_MAP_ERROR_PANOTALIGN; - } - for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++) - { - off = (va >> level_shift); - off &= MMU_LEVEL_MASK; - if (!(cur_lv_tbl[off] & MMU_TYPE_USED)) - { - page = get_free_page(); - if (!page) - { - return MMU_MAP_ERROR_NOPAGE; - } - mmu_memset((char *)page, 0, ARCH_PAGE_SIZE); - cur_lv_tbl[off] = page | MMU_TYPE_TABLE; - } - page = cur_lv_tbl[off]; - if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK) - { - /* is block! error! */ - return MMU_MAP_ERROR_CONFLICT; - } - cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK); - level_shift -= MMU_LEVEL_SHIFT; - } - attr &= MMU_ATTRIB_MASK; - pa |= (attr | MMU_TYPE_BLOCK); /* block */ - off = (va >> ARCH_SECTION_SHIFT); - off &= MMU_LEVEL_MASK; - cur_lv_tbl[off] = pa; - return 0; -} - -int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr) -{ - unsigned long i; - int ret; - - if (va & ARCH_SECTION_MASK) - { - return -1; - } - if (pa & ARCH_SECTION_MASK) - { - return -1; - } - for (i = 0; i < count; i++) - { - ret = _map_single_page_2M(lv0_tbl, va, pa, attr); - va += ARCH_SECTION_SIZE; - pa += ARCH_SECTION_SIZE; - if (ret != 0) - { - return ret; - } - } - return 0; -} - void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off) { int ret; @@ -1022,7 +945,6 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo /* clean the first two pages */ mmu_memset((char *)tbl0, 0, ARCH_PAGE_SIZE); -#ifdef RT_USING_USERSPACE mmu_memset((char *)tbl1, 0, ARCH_PAGE_SIZE); ret = armv8_init_map_2M(tbl1, va, va + pv_off, count, normal_attr); @@ -1030,7 +952,6 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo { while (1); } -#endif ret = armv8_init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr); if (ret != 0) { -- Gitee