diff --git a/components/lwp/arch/arm/cortex-a/lwp_arch.h b/components/lwp/arch/arm/cortex-a/lwp_arch.h index 22a0f1fc6cabb04035e47dc544269faa70c17ce0..6303b7a77fabcc1a80fbeb559852640bfb57f1b7 100644 --- a/components/lwp/arch/arm/cortex-a/lwp_arch.h +++ b/components/lwp/arch/arm/cortex-a/lwp_arch.h @@ -14,10 +14,13 @@ #ifdef RT_USING_USERSPACE -#define USER_HEAP_VADDR 0x80000000 -#define USER_STACK_VSTART 0x70000000 +#define USER_HEAP_VEND 0xB0000000UL +#define USER_HEAP_VADDR 0x80000000UL +#define USER_STACK_VSTART 0x70000000UL #define USER_STACK_VEND USER_HEAP_VADDR -#define USER_VADDR_START 0x00100000 +#define LDSO_LOAD_VADDR 0x60000000UL +#define USER_VADDR_START 0x00100000UL +#define USER_LOAD_VADDR USER_VADDR_START #ifdef __cplusplus extern "C" { diff --git a/components/lwp/arch/risc-v/virt64/lwp_arch.h b/components/lwp/arch/risc-v/virt64/lwp_arch.h index f6d261d2e5d152fc1f8cfc96e7264208e17d5cb6..2171035c140807e579e85119b90d97fee60f0347 100644 --- a/components/lwp/arch/risc-v/virt64/lwp_arch.h +++ b/components/lwp/arch/risc-v/virt64/lwp_arch.h @@ -17,7 +17,9 @@ #define USER_HEAP_VADDR 0x300000000UL #define USER_STACK_VSTART 0x270000000UL #define USER_STACK_VEND USER_HEAP_VADDR +#define LDSO_LOAD_VADDR 0x200000000UL #define USER_VADDR_START 0x100000000UL +#define USER_LOAD_VADDR USER_VADDR_START #define MMU_MAP_U_RWCB 0 #define MMU_MAP_U_RW 0 diff --git a/components/lwp/lwp.c b/components/lwp/lwp.c index 3826306d98ffd6944582b820213d5bac87b85484..9bba06fd7c79c3b38d1e51beba288b286cc869a3 100644 --- a/components/lwp/lwp.c +++ b/components/lwp/lwp.c @@ -22,6 +22,7 @@ #endif #include "lwp.h" +#include "lwp_arch.h" #define DBG_TAG "LWP" #define DBG_LVL DBG_WARNING @@ -35,18 +36,13 @@ #include #include - -#ifdef ARCH_RISCV64 - #define USER_LOAD_VADDR 0x200000000 -#else - #define USER_LOAD_VADDR 0x100000 -#endif #endif static const char elf_magic[] = {0x7f, 'E', 'L', 'F'}; extern void lwp_user_entry(void *args, const void *text, void *data, void *k_stack); extern int libc_stdio_get_console(void); +int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]); /** * RT-Thread light-weight process @@ -318,6 +314,7 @@ void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, s void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym); #endif +#ifdef RT_USING_USERSPACE struct map_range { void *start; @@ -350,33 +347,76 @@ static void expand_map_range(struct map_range *m, void *start, size_t size) static int map_range_ckeck(struct map_range *m1, struct map_range *m2) { - int ret = 0; void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK); void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK); void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK); void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK); - if (m1_start < m2_start) + if (m1->size) { - if (m1_end > m2_start) + if (m1_start < (void *)USER_LOAD_VADDR) + { + return -1; + } + if (m1_start > (void *)USER_STACK_VSTART) { - ret = -1; + return -1; + } + if (m1_end < (void *)USER_LOAD_VADDR) + { + return -1; + } + if (m1_end > (void *)USER_STACK_VSTART) + { + return -1; } } - else /* m2_start <= m1_start */ + if (m2->size) { - if (m2_end > m1_start) + if (m2_start < (void *)USER_LOAD_VADDR) + { + return -1; + } + if (m2_start > (void *)USER_STACK_VSTART) + { + return -1; + } + if (m2_end < (void *)USER_LOAD_VADDR) { - ret = -1; + return -1; + } + if (m2_end > (void *)USER_STACK_VSTART) + { + return -1; } } - return ret; + + if ((m1->size != 0) && (m2->size != 0)) + { + if (m1_start < m2_start) + { + if (m1_end > m2_start) + { + return -1; + } + } + else /* m2_start <= m1_start */ + { + if (m2_end > m1_start) + { + return -1; + } + } + } + return 0; } +#endif static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux) { uint32_t i; uint32_t off = 0; + size_t load_off = 0; char *p_section_str = 0; Elf_sym *dynsym = 0; Elf_Ehdr eheader; @@ -391,10 +431,10 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str size_t rel_dyn_size = 0; size_t dynsym_off = 0; size_t dynsym_size = 0; - struct map_range text_area = {NULL, 0}; - struct map_range data_area = {NULL, 0}; #ifdef RT_USING_USERSPACE + struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */ void *pa, *va; + void *va_self; #endif #ifdef RT_USING_USERSPACE @@ -446,17 +486,46 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str return -RT_ERROR; } +#ifdef RT_USING_USERSPACE + { + off = eheader.e_phoff; + for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader) + { + check_off(off, len); + lseek(fd, off, SEEK_SET); + read_len = load_fread(&pheader, 1, sizeof pheader, fd); + check_read(read_len, sizeof pheader); + + if (pheader.p_type == PT_DYNAMIC) + { + /* load ld.so */ + return 1; /* 1 means dynamic */ + } + } + } +#endif + + if (eheader.e_entry != 0) + { + if ((eheader.e_entry != USER_LOAD_VADDR) + && (eheader.e_entry != LDSO_LOAD_VADDR)) + { + /* the entry is invalidate */ + return -RT_ERROR; + } + } + { /* load aux */ uint8_t *process_header; size_t process_header_size; off = eheader.e_phoff; process_header_size = eheader.e_phnum * sizeof pheader; - if (process_header_size > ARCH_PAGE_SIZE) +#ifdef RT_USING_USERSPACE + if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16])) { return -RT_ERROR; } -#ifdef RT_USING_USERSPACE va = (uint8_t *)lwp_map_user(lwp, (void *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE * 2), process_header_size, 0); if (!va) { @@ -465,7 +534,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str pa = rt_hw_mmu_v2p(m_info, va); process_header = (uint8_t *)pa - PV_OFFSET; #else - process_header = (uint8_t *)rt_malloc(process_header_size); + process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16])); if (!process_header) { return -RT_ERROR; @@ -480,9 +549,29 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str #endif aux->item[1].key = AT_PAGESZ; +#ifdef RT_USING_USERSPACE aux->item[1].value = ARCH_PAGE_SIZE; +#else + aux->item[1].value = RT_MM_PAGE_SIZE; +#endif aux->item[2].key = AT_RANDOM; - aux->item[2].value = rt_tick_get(); + { + uint32_t random_value = rt_tick_get(); + uint8_t *random; +#ifdef RT_USING_USERSPACE + uint8_t *krandom; + + random = (uint8_t *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE - sizeof(char[16])); + + krandom = (uint8_t *)rt_hw_mmu_v2p(m_info, random); + krandom = (uint8_t *)krandom - PV_OFFSET; + rt_memcpy(krandom, &random_value, sizeof random_value); +#else + random = (uint8_t *)(process_header + process_header_size); + rt_memcpy(random, &random_value, sizeof random_value); +#endif + aux->item[2].value = (uint32_t)(size_t)random; + } aux->item[3].key = AT_PHDR; #ifdef RT_USING_USERSPACE aux->item[3].value = (uint32_t)(size_t)va; @@ -498,64 +587,144 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str #endif } + if (load_addr) + { + load_off = (size_t)load_addr; + } #ifdef RT_USING_USERSPACE - /* map user */ - off = eheader.e_shoff; - for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader) + else { - check_off(off, len); - lseek(fd, off, SEEK_SET); - read_len = load_fread(&sheader, 1, sizeof sheader, fd); - check_read(read_len, sizeof sheader); - - if ((sheader.sh_flags & SHF_ALLOC) == 0) + /* map user */ + off = eheader.e_shoff; + for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader) { - continue; - } + check_off(off, len); + lseek(fd, off, SEEK_SET); + read_len = load_fread(&sheader, 1, sizeof sheader, fd); + check_read(read_len, sizeof sheader); - switch (sheader.sh_type) - { + if ((sheader.sh_flags & SHF_ALLOC) == 0) + { + continue; + } + + switch (sheader.sh_type) + { case SHT_PROGBITS: if ((sheader.sh_flags & SHF_WRITE) == 0) { - expand_map_range(&text_area, (void *)sheader.sh_addr, sheader.sh_size); + expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size); } else { - expand_map_range(&data_area, (void *)sheader.sh_addr, sheader.sh_size); + expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size); } break; case SHT_NOBITS: - expand_map_range(&data_area, (void *)sheader.sh_addr, sheader.sh_size); + expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size); break; default: + expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size); break; + } } - } - if (map_range_ckeck(&text_area, &data_area) != 0) - { - result = -RT_ERROR; - goto _exit; - } - if (text_area.start) - { - va = lwp_map_user(lwp, text_area.start, text_area.size, 1); - if (!va || (va != text_area.start)) + + if (user_area[0].size == 0) { + /* no code */ result = -RT_ERROR; goto _exit; } + + if (user_area[0].start == NULL) + { + /* DYN */ + load_off = USER_LOAD_VADDR; + user_area[0].start = (void *)((char*)user_area[0].start + load_off); + user_area[1].start = (void *)((char*)user_area[1].start + load_off); + } + + if (map_range_ckeck(&user_area[0], &user_area[1]) != 0) + { + result = -RT_ERROR; + goto _exit; + } + + /* text and data */ + for (i = 0; i < 2; i++) + { + if (user_area[i].size != 0) + { + va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (int)(i == 0)); + if (!va || (va != user_area[i].start)) + { + result = -RT_ERROR; + goto _exit; + } + } + } + lwp->text_size = user_area[0].size; } - if (data_area.start) +#else + else { - va = lwp_map_user(lwp, data_area.start, data_area.size, 0); - if (!va || (va != data_area.start)) + size_t start = -1UL; + size_t end = 0UL; + size_t total_size; + + off = eheader.e_shoff; + for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader) { - result = -RT_ERROR; + check_off(off, len); + lseek(fd, off, SEEK_SET); + read_len = load_fread(&sheader, 1, sizeof sheader, fd); + check_read(read_len, sizeof sheader); + + if ((sheader.sh_flags & SHF_ALLOC) == 0) + { + continue; + } + + switch (sheader.sh_type) + { + case SHT_PROGBITS: + case SHT_NOBITS: + if (start > sheader.sh_addr) + { + start = sheader.sh_addr; + } + if (sheader.sh_addr + sheader.sh_size > end) + { + end = sheader.sh_addr + sheader.sh_size; + } + break; + default: + break; + } + } + + total_size = end - start; + +#ifdef RT_USING_CACHE + load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ); +#else + load_off = (size_t)rt_malloc(total_size); +#endif + if (load_off == 0) + { + LOG_E("alloc text memory faild!"); + result = -RT_ENOMEM; goto _exit; } + else + { + LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size); + } + lwp->load_off = load_off; /* for free */ + lwp->text_size = total_size; } #endif + lwp->text_entry = (void *)(eheader.e_entry + load_off); off = eheader.e_phoff; for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader) @@ -571,95 +740,43 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str { return -RT_ERROR; } - if (load_addr) - { - if (eheader.e_type == ET_EXEC) - { - result = -RT_ERROR; - goto _exit; - } - lwp->text_entry = load_addr; - } - else - { -#ifdef RT_USING_USERSPACE - void *va; - if (eheader.e_type == ET_EXEC) - { - if (pheader.p_vaddr != USER_LOAD_VADDR) - { - result = -RT_ERROR; - goto _exit; - } - } - va = (void *)pheader.p_vaddr; - if (va) - { - lwp->text_entry = va; - lwp->text_size = pheader.p_memsz; - } - else - { - lwp->text_entry = RT_NULL; - } -#else -#ifdef RT_USING_CACHE - lwp->text_entry = (rt_uint8_t *)rt_malloc_align(pheader.p_memsz, RT_CPU_CACHE_LINE_SZ); -#else - lwp->text_entry = (rt_uint8_t *)rt_malloc(pheader.p_memsz); -#endif -#endif - if (lwp->text_entry == RT_NULL) - { - LOG_E("alloc text memory faild!"); - result = -RT_ENOMEM; - goto _exit; - } - else - { - LOG_D("lwp text malloc : %p, size: %d!", lwp->text_entry, lwp->text_size); - } - check_off(pheader.p_offset, len); - lseek(fd, pheader.p_offset, SEEK_SET); + check_off(pheader.p_offset, len); + lseek(fd, pheader.p_offset, SEEK_SET); #ifdef RT_USING_USERSPACE + { + uint32_t size = pheader.p_filesz; + size_t tmp_len = 0; + + va = (void *)(pheader.p_vaddr + load_addr); + read_len = 0; + while (size) { - uint32_t size = pheader.p_filesz; - void *va_self; - void *pa; - size_t tmp_len = 0; - - read_len = 0; - while (size) - { - pa = rt_hw_mmu_v2p(m_info, va); - va_self = (void *)((char *)pa - PV_OFFSET); - LOG_D("va_self = %p pa = %p", va_self, pa); - tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE; - tmp_len = load_fread(va_self, 1, tmp_len, fd); - rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len); - read_len += tmp_len; - size -= tmp_len; - va = (void *)((char *)va + ARCH_PAGE_SIZE); - } + pa = rt_hw_mmu_v2p(m_info, va); + va_self = (void *)((char *)pa - PV_OFFSET); + LOG_D("va_self = %p pa = %p", va_self, pa); + tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE; + tmp_len = load_fread(va_self, 1, tmp_len, fd); + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len); + read_len += tmp_len; + size -= tmp_len; + va = (void *)((char *)va + ARCH_PAGE_SIZE); } + } #else - read_len = load_fread(lwp->text_entry, 1, pheader.p_filesz, fd); + read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd); #endif - check_read(read_len, pheader.p_filesz); - } + check_read(read_len, pheader.p_filesz); + if (pheader.p_filesz < pheader.p_memsz) { #ifdef RT_USING_USERSPACE - void *va = (void *)((char *)lwp->text_entry + pheader.p_filesz); - void *va_self; - void *pa; uint32_t size = pheader.p_memsz - pheader.p_filesz; uint32_t size_s; uint32_t off; off = pheader.p_filesz & ARCH_PAGE_MASK; - va = (void *)(((size_t)lwp->text_entry + pheader.p_filesz) & ~ARCH_PAGE_MASK); + va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK); while (size) { size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off; @@ -672,13 +789,13 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str va = (void *)((char *)va + ARCH_PAGE_SIZE); } #else - memset((uint8_t *)lwp->text_entry + pheader.p_filesz, 0, (size_t)(pheader.p_memsz - pheader.p_filesz)); + memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz)); #endif } - break; } } + /* relocate */ if (eheader.e_type == ET_DYN) { /* section info */ @@ -709,18 +826,14 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str read_len = load_fread(&sheader, 1, sizeof sheader, fd); check_read(read_len, sizeof sheader); - if (strcmp(p_section_str + sheader.sh_name, "text") == 0) - { - lwp->text_size = sheader.sh_size; - } - else if (strcmp(p_section_str + sheader.sh_name, ".got") == 0) + if (strcmp(p_section_str + sheader.sh_name, ".got") == 0) { - got_start = (void *)((uint8_t *)lwp->text_entry + sheader.sh_addr); + got_start = (void *)((uint8_t *)sheader.sh_addr + load_off); got_size = (size_t)sheader.sh_size; } else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0) { - rel_dyn_start = (void *)((uint8_t *)lwp->text_entry + sheader.sh_addr); + rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off); rel_dyn_size = (size_t)sheader.sh_size; } else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0) @@ -745,9 +858,9 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str check_read(read_len, dynsym_size); } #ifdef RT_USING_USERSPACE - lwp_elf_reloc(m_info, (void *)lwp->text_entry, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym); + lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym); #else - lwp_elf_reloc((void *)lwp->text_entry, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym); + lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym); rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size); rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size); @@ -767,7 +880,7 @@ _exit: } if (result != RT_EOK) { - LOG_E("lwp dynamic load faild, %d", result); + LOG_E("lwp load faild, %d", result); } return result; } @@ -816,7 +929,7 @@ RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_add lseek(fd, 0, SEEK_SET); ret = load_elf(fd, len, lwp, ptr, aux); - if (ret != RT_EOK) + if ((ret != RT_EOK) && (ret != 1)) { LOG_E("lwp load ret = %d", ret); } @@ -834,7 +947,10 @@ void lwp_cleanup(struct rt_thread *tid) rt_base_t level; struct rt_lwp *lwp; - if (tid == NULL) return; + if (tid == NULL) + { + return; + } LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr); @@ -971,6 +1087,12 @@ pid_t lwp_execve(char *filename, int argc, char **argv, char **envp) } result = lwp_load(filename, lwp, RT_NULL, 0, aux); + if (result == 1) + { + /* dynmaic */ + lwp_unmap_user(lwp, (void *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE)); + result = load_ldso(lwp, filename, argv, envp); + } if (result == RT_EOK) { rt_thread_t thread = RT_NULL; diff --git a/components/lwp/lwp.h b/components/lwp/lwp.h index 73b417d6815c76d68802e7bdab9fc8b375c36673..2b540e0899987bc7b9f9e32c6d68e532ac31f28b 100644 --- a/components/lwp/lwp.h +++ b/components/lwp/lwp.h @@ -70,6 +70,9 @@ struct rt_lwp uint32_t text_size; void *data_entry; uint32_t data_size; +#ifndef RT_USING_USERSPACE + size_t load_off; +#endif int ref; void *args; diff --git a/components/lwp/lwp_pid.c b/components/lwp/lwp_pid.c index dd170a4fb90233252f4478c2190b1f3ada279201..50b14b531c5a100e619e0b0615981eff7a1a473e 100644 --- a/components/lwp/lwp_pid.c +++ b/components/lwp/lwp_pid.c @@ -387,9 +387,9 @@ void lwp_free(struct rt_lwp* lwp) LOG_D("lwp text free: %p", lwp->text_entry); #ifndef RT_USING_USERSPACE #ifdef RT_USING_CACHE - rt_free_align(lwp->text_entry); + rt_free_align((void*)lwp->load_off); #else - rt_free(lwp->text_entry); + rt_free((void*)lwp->load_off); #endif #endif lwp->text_entry = RT_NULL; diff --git a/components/lwp/lwp_syscall.c b/components/lwp/lwp_syscall.c index e57976e25e0f119f683baea69a5cc80b6128694c..e1ef52011a918af18e00df8a970860304128db01 100644 --- a/components/lwp/lwp_syscall.c +++ b/components/lwp/lwp_syscall.c @@ -1818,6 +1818,134 @@ quit: return page; } +int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]) +{ + int ret = -1; + int i; + void *page; + void *new_page; + int argc = 0; + int envc = 0; + int size; + char **kargv; + char **kenvp; + size_t len; + char *p; + char *i_arg; + struct lwp_args_info args_info; + struct process_aux *aux; + + size = sizeof(char *); + if (argv) + { + while (1) + { + if (!argv[argc]) + { + break; + } + len = rt_strlen((const char *)argv[argc]); + size += sizeof(char *) + len + 1; + argc++; + } + } + if (envp) + { + while (1) + { + if (!envp[envc]) + { + break; + } + len = rt_strlen((const char *)envp[envc]); + size += sizeof(char *) + len + 1; + envc++; + } + } + + page = rt_pages_alloc(0); /* 1 page */ + if (!page) + { + rt_set_errno(ENOMEM); + goto quit; + } + kargv = (char **)page; + kenvp = kargv + argc + 1; + p = (char *)(kenvp + envc + 1); + /* copy argv */ + if (argv) + { + for (i = 0; i < argc; i++) + { + kargv[i] = p; + len = rt_strlen(argv[i]) + 1; + rt_memcpy(p, argv[i], len); + p += len; + } + kargv[i] = NULL; + } + /* copy envp */ + if (envp) + { + for (i = 0; i < envc; i++) + { + kenvp[i] = p; + len = rt_strlen(envp[i]) + 1; + rt_memcpy(p, envp[i], len); + p += len; + } + kenvp[i] = NULL; + } + + args_info.argc = argc; + args_info.argv = kargv; + args_info.envc = envc; + args_info.envp = kenvp; + args_info.size = size; + + new_page = _insert_args(1, &exec_name, &args_info); + rt_pages_free(page, 0); + page = new_page; + if (!page) + { + goto quit; + } + + i_arg = "-e"; + new_page = _insert_args(1, &i_arg, &args_info); + rt_pages_free(page, 0); + page = new_page; + if (!page) + { + goto quit; + } + + i_arg = "ld.so"; + new_page = _insert_args(1, &i_arg, &args_info); + rt_pages_free(page, 0); + page = new_page; + if (!page) + { + goto quit; + } + + if ((aux = lwp_argscopy(lwp, args_info.argc, args_info.argv, args_info.envp)) == NULL) + { + rt_set_errno(ENOMEM); + goto quit; + } + + ret = lwp_load("/bin/ld.so", lwp, RT_NULL, 0, aux); + + rt_strncpy(lwp->cmd, exec_name, RT_NAME_MAX); +quit: + if (page) + { + rt_pages_free(page, 0); + } + return ret; +} + int sys_execve(const char *path, char *const argv[], char *const envp[]) { int ret = -1; @@ -1995,6 +2123,12 @@ int sys_execve(const char *path, char *const argv[], char *const envp[]) goto quit; } ret = lwp_load(path, new_lwp, RT_NULL, 0, aux); + if (ret == 1) + { + /* dynamic */ + lwp_unmap_user(new_lwp, (void *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE)); + ret = load_ldso(new_lwp, (char *)path, args_info.argv, args_info.envp); + } if (ret == RT_EOK) { int off = 0; @@ -2068,7 +2202,7 @@ quit: { lwp_ref_dec(new_lwp); } - return -1; + return ret; } rt_err_t sys_thread_delete(rt_thread_t thread) @@ -3505,6 +3639,8 @@ int sys_setrlimit(unsigned int resource, struct rlimit *rlim) return -1; } +int sys_cacheflush(void *addr, int len, int cache); + const static void* func_table[] = { (void *)sys_exit, /* 01 */ @@ -3627,8 +3763,8 @@ const static void* func_table[] = (void *)sys_sigaction, (void *)sys_sigprocmask, (void *)sys_tkill, /* 105 */ - (void *)sys_notimpl, (void *)sys_thread_sigprocmask, + (void *)sys_cacheflush, (void *)sys_notimpl, (void *)sys_notimpl, (void *)sys_waitpid, /* 110 */ diff --git a/components/lwp/lwp_user_mm.c b/components/lwp/lwp_user_mm.c index a46b65931c304b05f98fc34ae10c424363c5cf6a..f7fad3b2afda1385c664d5a0cee3a0da6cfe9a36 100644 --- a/components/lwp/lwp_user_mm.c +++ b/components/lwp/lwp_user_mm.c @@ -73,6 +73,7 @@ void lwp_mmu_switch(struct rt_thread *thread) #endif switch_mmu(new_mmu_table); } + rt_cpu_set_thread_idr(thread->thread_idr); #ifdef RT_USING_GDBSERVER if (l && l->debug) { @@ -375,19 +376,18 @@ rt_base_t lwp_brk(void *addr) } else { - size_t size; - void *va; + size_t size = 0; + void *va = RT_NULL; - size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) & ~ARCH_PAGE_MASK; - va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0); - if (va) + if ((size_t)addr <= USER_HEAP_VEND) { - lwp->end_heap += size; - ret = 0; + size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) & ~ARCH_PAGE_MASK; + va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0); } - else + if (va) { - ret = -1; + lwp->end_heap += size; + ret = lwp->end_heap; } } rt_hw_interrupt_enable(level); diff --git a/include/rtdef.h b/include/rtdef.h index de47a1b6f04f9fe970f5fe88edb8c7fca4aeccf8..e5f398a386572212793b50098b4ad8f3d9b0e585 100644 --- a/include/rtdef.h +++ b/include/rtdef.h @@ -743,9 +743,9 @@ struct rt_thread int debug_suspend; struct rt_hw_exp_stack *regs; void * thread_idr; /** lwp thread indicator */ - int tid; int *clear_child_tid; #endif + int tid; #endif rt_ubase_t user_data; /**< private user data beyond this thread */ diff --git a/libcpu/arm/cortex-a/cache.c b/libcpu/arm/cortex-a/cache.c index 30af86baa125b97ae3749b9ae3ed8cc1d66ebe03..826056d56504d4868f126b765e9bc9d9c1a7a4e6 100644 --- a/libcpu/arm/cortex-a/cache.c +++ b/libcpu/arm/cortex-a/cache.c @@ -31,8 +31,8 @@ void rt_hw_cpu_icache_invalidate(void *addr, int size) rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1; asm volatile ("dmb":::"memory"); - start_addr &= ~(line_size-1); - end_addr &= ~(line_size-1); + start_addr &= ~(line_size - 1); + end_addr &= ~(line_size - 1); while (start_addr < end_addr) { asm volatile ("mcr p15, 0, %0, c7, c5, 1" :: "r"(start_addr)); /* icimvau */ @@ -48,8 +48,8 @@ void rt_hw_cpu_dcache_invalidate(void *addr, int size) rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1; asm volatile ("dmb":::"memory"); - start_addr &= ~(line_size-1); - end_addr &= ~(line_size-1); + start_addr &= ~(line_size - 1); + end_addr &= ~(line_size - 1); while (start_addr < end_addr) { asm volatile ("mcr p15, 0, %0, c7, c6, 1" :: "r"(start_addr)); /* dcimvac */ @@ -65,8 +65,8 @@ void rt_hw_cpu_dcache_clean(void *addr, int size) rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1; asm volatile ("dmb":::"memory"); - start_addr &= ~(line_size-1); - end_addr &= ~(line_size-1); + start_addr &= ~(line_size - 1); + end_addr &= ~(line_size - 1); while (start_addr < end_addr) { asm volatile ("mcr p15, 0, %0, c7, c10, 1" :: "r"(start_addr)); /* dccmvac */ @@ -75,18 +75,42 @@ void rt_hw_cpu_dcache_clean(void *addr, int size) asm volatile ("dsb":::"memory"); } +void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, int size) +{ + rt_uint32_t line_size = rt_cpu_dcache_line_size(); + rt_uint32_t start_addr = (rt_uint32_t)addr; + rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1; + + asm volatile ("dmb":::"memory"); + start_addr &= ~(line_size - 1); + end_addr &= ~(line_size - 1); + while (start_addr < end_addr) + { + asm volatile ("mcr p15, 0, %0, c7, c10, 1" :: "r"(start_addr)); /* dccmvac */ + asm volatile ("mcr p15, 0, %0, c7, c6, 1" :: "r"(start_addr)); /* dcimvac */ + start_addr += line_size; + } + asm volatile ("dsb":::"memory"); +} + void rt_hw_cpu_icache_ops(int ops, void *addr, int size) { if (ops == RT_HW_CACHE_INVALIDATE) + { rt_hw_cpu_icache_invalidate(addr, size); + } } void rt_hw_cpu_dcache_ops(int ops, void *addr, int size) { if (ops == RT_HW_CACHE_FLUSH) + { rt_hw_cpu_dcache_clean(addr, size); + } else if (ops == RT_HW_CACHE_INVALIDATE) + { rt_hw_cpu_dcache_invalidate(addr, size); + } } rt_base_t rt_hw_cpu_icache_status(void) @@ -98,3 +122,24 @@ rt_base_t rt_hw_cpu_dcache_status(void) { return 0; } + +#define ICACHE (1<<0) +#define DCACHE (1<<1) +#define BCACHE (ICACHE|DCACHE) + +int sys_cacheflush(void *addr, int size, int cache) +{ + if ((size_t)addr < KERNEL_VADDR_START && (size_t)addr + size <= KERNEL_VADDR_START) + { + if ((cache & DCACHE) != 0) + { + rt_hw_cpu_dcache_clean_and_invalidate(addr, size); + } + if ((cache & ICACHE) != 0) + { + rt_hw_cpu_icache_invalidate(addr, size); + } + return 0; + } + return -1; +} diff --git a/libcpu/arm/cortex-a/mmu.c b/libcpu/arm/cortex-a/mmu.c index 0c3a7bb9a7a58e42bf9d98cf0f9ca2fdea8beab3..70e92d145e47dc7ab8ad3c699246fe532a36c41d 100644 --- a/libcpu/arm/cortex-a/mmu.c +++ b/libcpu/arm/cortex-a/mmu.c @@ -388,6 +388,17 @@ static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages) return -1; } + l1_off = ((size_t)va >> ARCH_SECTION_SHIFT); + if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend) + { + return -1; + } + l1_off += ((pages << ARCH_PAGE_SHIFT) >> ARCH_SECTION_SHIFT); + if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend + 1) + { + return -1; + } + while (pages--) { l1_off = (loop_va >> ARCH_SECTION_SHIFT); diff --git a/src/cpu.c b/src/cpu.c index 2f71c4f8aac6eb09b7c0a2e52930428dd8eb6c62..d253275c331c2013f56921ba066a5e6db3c1ce29 100644 --- a/src/cpu.c +++ b/src/cpu.c @@ -201,12 +201,7 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread) struct rt_cpu* pcpu = rt_cpu_self(); #ifdef RT_USING_USERSPACE - if (pcpu->current_thread) - { - pcpu->current_thread->thread_idr = rt_cpu_get_thread_idr(); - } lwp_mmu_switch(thread); - rt_cpu_set_thread_idr(thread->thread_idr); #endif pcpu->current_thread = thread; if (!thread->cpus_lock_nest) diff --git a/src/ipc.c b/src/ipc.c index 7e64496e55d85c62c03d25adf7402b16fbb7b8e6..e78c88854955d80a8442d9e0bf0d85809c300a04 100644 --- a/src/ipc.c +++ b/src/ipc.c @@ -1284,6 +1284,8 @@ static rt_err_t _rt_event_recv(rt_event_t event, if (status == RT_EOK) { + thread->error = RT_EOK; + /* set received event */ if (recved) *recved = (event->set & set);