diff --git a/components/lwp/lwp.h b/components/lwp/lwp.h index f65c2a94a59e3dedd2710738af1ff3b8acc02e5c..8a66f09ad268c909468a715de3b1d8e518af2717 100644 --- a/components/lwp/lwp.h +++ b/components/lwp/lwp.h @@ -68,6 +68,11 @@ struct rt_lwp struct rt_mpu_info mpu_info; #endif /* ARCH_MM_MPU */ #endif + +#ifdef RT_USING_SMP + int bind_cpu; +#endif + uint8_t lwp_type; uint8_t reserv[3]; @@ -151,6 +156,7 @@ void lwp_mmu_switch(struct rt_thread *thread); #endif void lwp_user_setting_save(rt_thread_t thread); void lwp_user_setting_restore(rt_thread_t thread); +int lwp_setaffinity(pid_t pid, int cpu); #ifdef RT_USING_USERSPACE struct __pthread { diff --git a/components/lwp/lwp_ipc.c b/components/lwp/lwp_ipc.c index ea96db71c0cd0b90366142f951d3d2f86386aab8..3d1aa0e7a46609968d2cda67a6fbbe3fb1378879 100644 --- a/components/lwp/lwp_ipc.c +++ b/components/lwp/lwp_ipc.c @@ -866,17 +866,20 @@ static int channel_fops_close(struct dfs_fd *file) level = rt_hw_interrupt_disable(); ch = (rt_channel_t)file->fnode->data; - ch->ref--; - if (ch->ref == 0) + if (file->fnode->ref_count == 1) { - /* wakeup all the suspended receivers and senders */ - rt_channel_list_resume_all(&ch->parent.suspend_thread); - rt_channel_list_resume_all(&ch->wait_thread); + ch->ref--; + if (ch->ref == 0) + { + /* wakeup all the suspended receivers and senders */ + rt_channel_list_resume_all(&ch->parent.suspend_thread); + rt_channel_list_resume_all(&ch->wait_thread); - /* all ipc msg will lost */ - rt_list_init(&ch->wait_msg); + /* all ipc msg will lost */ + rt_list_init(&ch->wait_msg); - rt_object_delete(&ch->parent.parent); /* release the IPC channel structure */ + rt_object_delete(&ch->parent.parent); /* release the IPC channel structure */ + } } rt_hw_interrupt_enable(level); return 0; @@ -966,6 +969,7 @@ rt_err_t lwp_channel_close(int fdt_type, int fd) { rt_channel_t ch; struct dfs_fd *d; + struct dfs_fnode *fnode; d = lwp_fd_get(fdt_type, fd); if (!d) @@ -973,20 +977,25 @@ rt_err_t lwp_channel_close(int fdt_type, int fd) return -RT_EIO; } - if (!d->fnode) + fnode = d->fnode; + if (!fnode) { return -RT_EIO; } ch = fd_2_channel(fdt_type, fd); - rt_free(d->fnode); if (!ch) { return -RT_EIO; } _chfd_free(fd, fdt_type); + if (fnode->ref_count == 1) + { + rt_free(fnode); + return rt_raw_channel_close(ch); + } - return rt_raw_channel_close(ch); + return 0; } rt_err_t lwp_channel_send(int fdt_type, int fd, rt_channel_msg_t data) diff --git a/components/lwp/lwp_pid.c b/components/lwp/lwp_pid.c index 1b4696d78e6ec3ef32a6ea4f99fd123b24a2971f..dd01c645a513af8c3161e30ad487db28fff05e9e 100644 --- a/components/lwp/lwp_pid.c +++ b/components/lwp/lwp_pid.c @@ -1028,3 +1028,64 @@ void lwp_wait_subthread_exit(void) rt_thread_mdelay(10); } } + +static int _lwp_setaffinity(pid_t pid, int cpu) +{ + struct rt_lwp *lwp; + int ret = -1; + + lwp = lwp_from_pid(pid); + if (lwp) + { +#ifdef RT_USING_SMP + rt_list_t *list; + + lwp->bind_cpu = cpu; + for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next) + { + rt_thread_t thread; + + thread = rt_list_entry(list, struct rt_thread, sibling); + rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu); + } +#endif + ret = 0; + } + return ret; +} + +int lwp_setaffinity(pid_t pid, int cpu) +{ + rt_base_t level; + int ret; + +#ifdef RT_USING_SMP + if (cpu < 0 || cpu > RT_CPUS_NR) + { + cpu = RT_CPUS_NR; + } +#endif + level = rt_hw_interrupt_disable(); + ret = _lwp_setaffinity(pid, cpu); + rt_hw_interrupt_enable(level); + return ret; +} + +#ifdef RT_USING_SMP +static void cmd_cpu_bind(int argc, char** argv) +{ + int pid; + int cpu; + + if (argc < 3) + { + rt_kprintf("Useage: cpu_bind pid cpu\n"); + return; + } + + pid = atoi(argv[1]); + cpu = atoi(argv[2]); + lwp_setaffinity((pid_t)pid, cpu); +} +MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu); +#endif diff --git a/components/lwp/lwp_syscall.c b/components/lwp/lwp_syscall.c index 9581fe7a043fe22ab8cd08be22f6185730e3f979..4a3b1bbc348f99eafa94e6cf81d17131ab0ab6e8 100644 --- a/components/lwp/lwp_syscall.c +++ b/components/lwp/lwp_syscall.c @@ -1325,6 +1325,9 @@ rt_thread_t sys_thread_create(void *arg[]) goto fail; } +#ifdef RT_USING_SMP + thread->bind_cpu = lwp->bind_cpu; +#endif thread->cleanup = lwp_cleanup; thread->user_entry = (void (*)(void *))arg[1]; thread->user_stack = (void *)user_stack; @@ -1494,6 +1497,9 @@ long _sys_clone(void *arg[]) goto fail; } +#ifdef RT_USING_SMP + thread->bind_cpu = lwp->bind_cpu; +#endif thread->cleanup = lwp_cleanup; thread->user_entry = RT_NULL; thread->user_stack = RT_NULL; @@ -4015,6 +4021,11 @@ int sys_madvise(void *addr, size_t len, int behav) } #endif +int sys_setaffinity(pid_t pid, int cpu) +{ + return lwp_setaffinity(pid, cpu); +} + const static void* func_table[] = { (void *)sys_exit, /* 01 */ @@ -4152,7 +4163,7 @@ const static void* func_table[] = (void *)sys_thread_sigprocmask, #ifdef ARCH_MM_MMU (void *)sys_cacheflush, - (void *)sys_notimpl, + (void *)sys_setaffinity, (void *)sys_notimpl, #else (void *)sys_notimpl, diff --git a/src/scheduler.c b/src/scheduler.c index faa67ca0049cf921e36d1129f484ec1ee0b4ff59..2b44d4366d392afdf3fce938b244a21c9cf823ac 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -358,13 +358,20 @@ void rt_schedule(void) current_thread->oncpu = RT_CPU_DETACHED; if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) { - if (current_thread->current_priority < highest_ready_priority) + if (current_thread->bind_cpu == RT_CPUS_NR || current_thread->bind_cpu == cpu_id) { - to_thread = current_thread; - } - else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0) - { - to_thread = current_thread; + if (current_thread->current_priority < highest_ready_priority) + { + to_thread = current_thread; + } + else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0) + { + to_thread = current_thread; + } + else + { + rt_schedule_insert_thread(current_thread); + } } else { @@ -612,13 +619,20 @@ void rt_scheduler_do_irq_switch(void *context) current_thread->oncpu = RT_CPU_DETACHED; if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) { - if (current_thread->current_priority < highest_ready_priority) + if (current_thread->bind_cpu == RT_CPUS_NR || current_thread->bind_cpu == cpu_id) { - to_thread = current_thread; - } - else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0) - { - to_thread = current_thread; + if (current_thread->current_priority < highest_ready_priority) + { + to_thread = current_thread; + } + else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0) + { + to_thread = current_thread; + } + else + { + rt_schedule_insert_thread(current_thread); + } } else { diff --git a/src/thread.c b/src/thread.c index 2027852e45e584f032cf4219dd2f6f8141e9054d..4e5737773ff4c4d909189cba8a20374f3d0f1aab 100644 --- a/src/thread.c +++ b/src/thread.c @@ -629,6 +629,66 @@ rt_err_t rt_thread_mdelay(rt_int32_t ms) } RTM_EXPORT(rt_thread_mdelay); +#ifdef RT_USING_SMP +static void rt_thread_cpu_bind(rt_thread_t thread, int cpu) +{ + rt_base_t level; + + if (cpu >= RT_CPUS_NR) + { + cpu = RT_CPUS_NR; + } + + level = rt_hw_interrupt_disable(); + if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY) + { + /* unbind */ + /* remove from old ready queue */ + rt_schedule_remove_thread(thread); + /* change thread bind cpu */ + thread->bind_cpu = cpu; + /* add to new ready queue */ + rt_schedule_insert_thread(thread); + if (rt_thread_self() != RT_NULL) + { + rt_schedule(); + } + } + else + { + thread->bind_cpu = cpu; + if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) + { + /* thread is running on a cpu */ + int current_cpu = rt_hw_cpu_id(); + + if (cpu != RT_CPUS_NR) + { + if (thread->oncpu == current_cpu) + { + /* current thread on current cpu */ + if (cpu != current_cpu) + { + /* bind to other cpu */ + rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu); + /* self cpu need reschedule */ + rt_schedule(); + } + /* else do nothing */ + } + else + { + /* no running on self cpu, but dest cpu can be itself */ + rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << thread->oncpu); + } + } + /* else do nothing */ + } + } + rt_hw_interrupt_enable(level); +} +#endif + /** * This function will control thread behaviors according to control command. * @@ -716,14 +776,8 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg) { rt_uint8_t cpu; - if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT) - { - /* we only support bind cpu before started phase. */ - return RT_ERROR; - } - cpu = (rt_uint8_t)(size_t)arg; - thread->bind_cpu = cpu > RT_CPUS_NR? RT_CPUS_NR : cpu; + rt_thread_cpu_bind(thread, cpu); break; } #endif /*RT_USING_SMP*/