原始版本

This commit is contained in:
冯佳
2025-06-19 21:56:46 +08:00
parent fe98e5f010
commit a4841450cf
4152 changed files with 1910684 additions and 0 deletions

View File

@ -0,0 +1,90 @@
menuconfig RT_USING_LWP
bool "light-weight process"
depends on RT_USING_SMART
default y
help
The lwP is a light weight process running in user mode.
if RT_USING_LWP
menuconfig LWP_DEBUG
bool "Enable debugging features of LwP"
default n
if LWP_DEBUG
config LWP_DEBUG_INIT
select RT_USING_HOOKLIST
bool "Enable debug mode of init process"
depends on LWP_USING_RUNTIME
default y
endif
config LWP_USING_RUNTIME
bool "Using processes runtime environment (INIT process)"
default y
help
Runtime environment provide by init process including boot scripts,
poweroff, shutdown, reboot, etc.
config RT_LWP_MAX_NR
int "The max number of light-weight process"
default 30
config LWP_TASK_STACK_SIZE
int "The lwp thread kernel stack size"
default 16384
config RT_CH_MSG_MAX_NR
int "The maximum number of channel messages"
default 1024
config LWP_TID_MAX_NR
int "The maximum number of lwp thread id"
default 64
config LWP_ENABLE_ASID
bool "The switch of ASID feature"
depends on ARCH_ARM_CORTEX_A
default y
if ARCH_MM_MMU
config RT_LWP_SHM_MAX_NR
int "The maximum number of shared memory"
default 64
config LWP_USING_MPROTECT
bool
default n
help
ARCH has the support of mprotect
endif
if ARCH_MM_MPU
config RT_LWP_MPU_MAX_NR
int "The maximum number of mpu region"
default 2
config RT_LWP_USING_SHM
bool "Enable shared memory"
default y
endif
menuconfig RT_USING_LDSO
bool "LDSO: dynamic load shared objects"
depends on RT_USING_DFS_V2
select RT_USING_PAGECACHE
default y
if RT_USING_LDSO
config ELF_DEBUG_ENABLE
bool "Enable ldso debug"
default n
config ELF_LOAD_RANDOMIZE
bool "Enable random load address"
default n
endif
rsource "terminal/Kconfig"
rsource "vdso/Kconfig"
endif

View File

@ -0,0 +1,55 @@
Import('rtconfig')
from building import *
import os
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
support_arch = {"arm": ["cortex-m3", "cortex-m4", "cortex-m7", "arm926", "cortex-a"],
"aarch64":["cortex-a"],
"risc-v": ["rv64"],
"x86": ["i386"]}
platform_file = {'armcc': 'rvds.S', 'gcc': 'gcc.S', 'iar': 'iar.S'}
platform = rtconfig.PLATFORM
arch = rtconfig.ARCH
cpu = rtconfig.CPU
# fix the cpu for risc-v
if arch == 'risc-v':
if GetDepend('ARCH_CPU_64BIT'):
cpu = 'rv64'
if platform in platform_file.keys(): # support platforms
if arch in support_arch.keys() and cpu in support_arch[arch]:
asm_path = 'arch/' + arch + '/' + cpu + '/*_' + platform_file[platform]
arch_common = 'arch/' + arch + '/' + 'common/*.c'
if not GetDepend('RT_USING_VDSO'):
vdso_files = ['vdso_data.c', 'vdso.c']
src += [f for f in Glob(arch_common) if os.path.basename(str(f)) not in vdso_files]
else:
src += Glob(arch_common)
if not GetDepend('ARCH_MM_MMU'):
excluded_files = ['ioremap.c', 'lwp_futex.c', 'lwp_mm_area.c', 'lwp_pmutex.c', 'lwp_shm.c', 'lwp_user_mm.c']
src += [f for f in Glob('*.c') if os.path.basename(str(f)) not in excluded_files] + Glob(asm_path)
else:
src += Glob('*.c') + Glob(asm_path)
src += Glob('arch/' + arch + '/' + cpu + '/*.c')
CPPPATH = [cwd]
CPPPATH += [cwd + '/arch/' + arch + '/' + cpu]
# Terminal I/O Subsystem
termios_path = ['./terminal/', './terminal/freebsd/']
for item in termios_path:
src += Glob(item + '*.c')
CPPPATH += ['./terminal/']
# Remove optional sources
if not GetDepend(['LWP_USING_RUNTIME']):
SrcRemove(src, 'lwp_runtime.c')
group = DefineGroup('lwP', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
group = group + SConscript(os.path.join('vdso', 'SConscript'))
Return('group')

View File

@ -0,0 +1,28 @@
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
#define Elf_Word Elf64_Word
#define Elf_Addr Elf64_Addr
#define Elf_Half Elf64_Half
#define Elf_Ehdr Elf64_Ehdr #define Elf_Phdr Elf64_Phdr
#define Elf_Shdr Elf64_Shdr
typedef struct
{
Elf_Word st_name;
Elf_Addr st_value;
Elf_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf_Half st_shndx;
} Elf_sym;
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym)
{
}

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#include <rtthread.h>
#include <mmu.h>
#include <gtimer.h>
#include <lwp_user_mm.h>
#include "vdso.h"
#include "vdso_datapage.h"
#define DBG_TAG "vdso"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
enum vdso_abi {
VDSO_ABI_AA64,
};
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,
VVAR_TIMENS_PAGE_OFFSET,
VVAR_NR_PAGES,
};
struct vdso_abi_info {
const char *name;
const char *vdso_code_start;
const char *vdso_code_end;
unsigned long vdso_pages;
};
static struct vdso_abi_info vdso_info[] = {
[VDSO_ABI_AA64] = {
.name = "vdso_aarch64",
.vdso_code_start = __vdso_text_start,
.vdso_code_end = __vdso_text_end,
},
};
static union {
struct vdso_data data[CS_BASES];
uint8_t page[ARCH_PAGE_SIZE];
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = vdso_data_store.data;
int init_ret_flag = RT_EOK;
static int __setup_additional_pages(enum vdso_abi abi, struct rt_lwp *lwp)
{
RT_ASSERT(lwp != RT_NULL);
int ret;
void *vdso_base = RT_NULL;
unsigned long vdso_data_len, vdso_text_len;
vdso_data_len = VVAR_NR_PAGES * ARCH_PAGE_SIZE;
vdso_text_len = vdso_info[abi].vdso_pages << ARCH_PAGE_SHIFT;
vdso_base = lwp_map_user_phy(lwp, RT_NULL, rt_kmem_v2p((void *)vdso_data), vdso_data_len, 0);
if(vdso_base != RT_NULL)
{
ret = RT_EOK;
}
else
{
ret = RT_ERROR;
}
vdso_base += vdso_data_len;
vdso_base = lwp_map_user_phy(lwp, vdso_base, rt_kmem_v2p((void *)vdso_info[abi].vdso_code_start), vdso_text_len, 0);
lwp->vdso_vbase = vdso_base;
return ret;
}
int arch_setup_additional_pages(struct rt_lwp *lwp)
{
int ret;
if (init_ret_flag != RT_EOK) return -RT_ERROR;
ret = __setup_additional_pages(VDSO_ABI_AA64, lwp);
return ret;
}
static void __initdata(void)
{
struct tm time_vdso = SOFT_RTC_VDSOTIME_DEFAULT;
vdso_data->realtime_initdata = timegm(&time_vdso);
}
static int validate_vdso_elf(void)
{
if (rt_memcmp(vdso_info[VDSO_ABI_AA64].vdso_code_start, ELF_HEAD, ELF_HEAD_LEN)) {
LOG_E("vDSO is not a valid ELF object!");
init_ret_flag = -RT_ERROR;
return -RT_ERROR;
}
vdso_info[VDSO_ABI_AA64].vdso_pages = (
vdso_info[VDSO_ABI_AA64].vdso_code_end -
vdso_info[VDSO_ABI_AA64].vdso_code_start) >>
ARCH_PAGE_SHIFT;
__initdata();
return RT_EOK;
}
INIT_COMPONENT_EXPORT(validate_vdso_elf);

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#include <rtthread.h>
#include <gtimer.h>
#include <ktime.h>
#include <time.h>
#include <vdso_datapage.h>
#include <vdso_data.h>
void rt_vdso_update_glob_time(void)
{
struct vdso_data *vdata = get_k_vdso_data();
struct timespec *vdso_ts;
uint64_t initdata = vdata->realtime_initdata;
rt_vdso_write_begin(vdata);
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
rt_ktime_boottime_get_ns(vdso_ts);
vdso_ts->tv_sec = initdata + vdso_ts->tv_sec;
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
rt_ktime_boottime_get_ns(vdso_ts);
vdata->cycle_last = rt_hw_get_cntpct_val();
rt_vdso_write_end(vdata);
}

View File

@ -0,0 +1,263 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-10-16 Shell Support a new backtrace framework
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#include <armv8.h>
#include <rthw.h>
#include <rtthread.h>
#include <stdlib.h>
#include <string.h>
#include <lwp_internal.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
extern size_t MMUTable[];
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (mmu_table)
{
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
}
else
{
return -RT_ENOMEM;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)NULL;
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 0);
lwp->aspace = NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~ARCH_PAGE_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
(stack_addr < (size_t)USER_STACK_VEND))
{
void *map =
lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1;
}
}
return ret;
}
#endif
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
void *user_stack, void **thread_sp)
{
struct rt_hw_exp_stack *syscall_frame;
struct rt_hw_exp_stack *thread_frame;
struct rt_hw_exp_stack *ori_syscall = rt_thread_self()->user_ctx.ctx;
RT_ASSERT(ori_syscall != RT_NULL);
new_thread_stack = (rt_ubase_t*)RT_ALIGN_DOWN((rt_ubase_t)new_thread_stack, 16);
syscall_frame = (void *)((long)new_thread_stack - sizeof(struct rt_hw_exp_stack));
memcpy(syscall_frame, ori_syscall, sizeof(*syscall_frame));
syscall_frame->sp_el0 = (long)user_stack;
syscall_frame->x0 = 0;
thread_frame = (void *)rt_hw_stack_init(exit, RT_NULL, (void *)syscall_frame, RT_NULL);
*thread_sp = thread_frame;
return 0;
}
#define ALGIN_BYTES (16)
/* the layout is part of ABI, dont change it */
struct signal_ucontext
{
rt_int64_t sigreturn;
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(ALGIN_BYTES)
struct rt_hw_exp_stack frame;
};
RT_STATIC_ASSERT(abi_offset_compatible, offsetof(struct signal_ucontext, si) == UCTX_ABI_OFFSET_TO_SI);
void *arch_signal_ucontext_get_frame(struct signal_ucontext *uctx)
{
return &uctx->frame;
}
/* internal used only */
void arch_syscall_prepare_signal(rt_base_t rc, struct rt_hw_exp_stack *exp_frame)
{
long x0 = exp_frame->x0;
exp_frame->x0 = rc;
exp_frame->x7 = x0;
return ;
}
void arch_syscall_restart(void *sp, void *ksp);
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
struct rt_hw_exp_stack *exp_frame = eframe;
if (exp_frame->x0 == -expected)
exp_frame->x0 = -code;
return ;
}
void arch_signal_check_erestart(void *eframe, void *ksp)
{
struct rt_hw_exp_stack *exp_frame = eframe;
long rc = exp_frame->x0;
long sys_id = exp_frame->x8;
(void)sys_id;
if (rc == -ERESTART)
{
LOG_D("%s(rc=%ld,sys_id=%ld,pid=%d)", __func__, rc, sys_id, lwp_self()->pid);
LOG_D("%s: restart rc = %ld", lwp_get_syscall_name(sys_id), rc);
exp_frame->x0 = exp_frame->x7;
arch_syscall_restart(eframe, ksp);
}
return ;
}
static void arch_signal_post_action(struct signal_ucontext *new_sp, rt_base_t kernel_sp)
{
arch_signal_check_erestart(&new_sp->frame, (void *)kernel_sp);
return ;
}
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp)
{
struct signal_ucontext *new_sp;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
arch_signal_post_action(new_sp, kernel_sp);
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (char *)&new_sp->frame + sizeof(struct rt_hw_exp_stack);
}
void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
struct rt_hw_exp_stack *exp_frame,
lwp_sigset_t *save_sig_mask)
{
struct signal_ucontext *new_sp;
new_sp = (void *)((user_sp - sizeof(struct signal_ucontext)) & ~0xf);
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
/* exp frame is already aligned as AAPCS64 required */
lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
const size_t lwp_sigreturn_bytes = 8;
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
int arch_backtrace_uthread(rt_thread_t thread)
{
struct rt_hw_backtrace_frame frame;
struct rt_hw_exp_stack *stack;
if (thread && thread->lwp)
{
stack = thread->user_ctx.ctx;
if ((long)stack > (unsigned long)thread->stack_addr
&& (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
{
frame.pc = stack->pc;
frame.fp = stack->x29;
lwp_backtrace_frame(thread, &frame);
return 0;
}
else
return -1;
}
return -1;
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <rtconfig.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0x0001000000000000UL
#define USER_HEAP_VADDR (0x0000ffff40000000UL)
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x0000ffff70000000UL
#define USER_STACK_VEND (USER_STACK_VSTART + 0x10000000)
#define USER_ARG_VADDR USER_STACK_VEND
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00200000UL
#define USER_LOAD_VADDR USER_VADDR_START
#define UCTX_ABI_OFFSET_TO_SI 16
#ifndef __ASSEMBLY__
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef __cplusplus
extern "C" {
#endif
unsigned long rt_hw_ffz(unsigned long x);
rt_inline void icache_invalid_all(void)
{
__asm__ volatile ("ic ialluis\n\tisb sy":::"memory");
}
/**
* @brief Save signal-related context to user stack
*
* @param user_sp the current sp of user
* @param exp_frame exception frame to resume former execution
* @param psiginfo pointer to the siginfo
* @param elr pc of former execution
* @param spsr program status of former execution
* @return void* the new user sp
*/
void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
struct rt_hw_exp_stack *exp_frame,
lwp_sigset_t *save_sig_mask);
/**
* @brief Restore the signal mask after return
*
* @param user_sp sp of user
* @return void*
*/
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp);
void arch_syscall_restart(void *sp, void *ksp);
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* ARCH_MM_MMU */
#endif /*LWP_ARCH_H__*/

View File

@ -0,0 +1,471 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <vector_gcc.h>
#include <armv8.h>
#include <lwp_arch.h>
/*********************
* SPSR BIT *
*********************/
#define SPSR_Mode(v) ((v) << 0)
#define SPSR_A64 (0 << 4)
#define SPSR_RESEVRED_5 (0 << 5)
#define SPSR_FIQ_MASKED(v) ((v) << 6)
#define SPSR_IRQ_MASKED(v) ((v) << 7)
#define SPSR_SERROR_MASKED(v) ((v) << 8)
#define SPSR_D_MASKED(v) ((v) << 9)
#define SPSR_RESEVRED_10_19 (0 << 10)
#define SPSR_IL(v) ((v) << 20)
#define SPSR_SS(v) ((v) << 21)
#define SPSR_RESEVRED_22_27 (0 << 22)
#define SPSR_V(v) ((v) << 28)
#define SPSR_C(v) ((v) << 29)
#define SPSR_Z(v) ((v) << 30)
#define SPSR_N(v) ((v) << 31)
/**************************************************/
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
msr daifset, #3
dsb sy
mrs x30, sp_el0
/* user stack top */
msr sp_el0, x2
mov x3, x2
msr spsr_el1, x4
msr elr_el1, x1
eret
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
sub x4, x2, #0x10
adr x2, lwp_thread_return
ldr x5, [x2]
str x5, [x4]
ldr x5, [x2, #4]
str x5, [x4, #4]
ldr x5, [x2, #8]
str x5, [x4, #8]
mov x5, x4
dc cvau, x5
add x5, x5, #8
dc cvau, x5
dsb sy
ic ialluis
dsb sy
msr sp_el0, x4
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
msr daifset, #3
dsb sy
mrs x30, sp_el0
msr spsr_el1, x4
msr elr_el1, x1
eret
.global arch_get_user_sp
arch_get_user_sp:
mrs x0, sp_el0
ret
.global arch_fork_exit
.global arch_clone_exit
arch_fork_exit:
arch_clone_exit:
mov x0, xzr
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
mov sp, x1
mov x4, #(SPSR_Mode(0) | SPSR_A64)
ldr x3, =0x0000ffff80000000
msr daifset, #3
msr spsr_el1, x4
msr elr_el1, x2
eret
/*
* void SVC_Handler(regs);
* since this routine reset the SP, we take it as a start point
*/
START_POINT(SVC_Handler)
mov fp, xzr
mov lr, xzr
/* x0 is initial sp */
mov sp, x0
bl _SVC_Handler
/* jump explictly, make this code position independant */
b arch_syscall_exit
START_POINT_END(SVC_Handler)
TRACE_SYMBOL(_SVC_Handler)
#define FRAME_REG x19
/**
* x0 -> frame_addr
*/
_SVC_Handler:
.local _SVC_Handler
stp fp, lr, [sp, -16]!
mov fp, sp
mov FRAME_REG, x0 /* save the value of frame address */
msr daifclr, #3 /* enable interrupt */
GET_THREAD_SELF x0
bl lwp_user_setting_save
ldp x8, x9, [FRAME_REG, #(CONTEXT_OFFSET_X8)]
and x0, x8, #0xf000
cmp x0, #0xe000
beq arch_signal_quit
cmp x0, #0xf000
beq ret_from_user
uxtb x0, w8
bl lwp_get_sys_api
cmp x0, xzr
mov x30, x0
beq arch_syscall_exit
ldp x0, x1, [FRAME_REG, #(CONTEXT_OFFSET_X0)]
ldp x2, x3, [FRAME_REG, #(CONTEXT_OFFSET_X2)]
ldp x4, x5, [FRAME_REG, #(CONTEXT_OFFSET_X4)]
ldp x6, x7, [FRAME_REG, #(CONTEXT_OFFSET_X6)]
blr x30
ldp fp, lr, [sp], 16
ret
/**
* void arch_syscall_exit(long rc)
*/
arch_syscall_exit:
.global arch_syscall_exit
/**
* backup former x0 which is required to restart syscall, then setup
* syscall return value in stack frame
*/
mov x1, sp
bl arch_syscall_prepare_signal
/**
* disable local irq so we don't messup with the spsr_el1 witch is not saved
* for kernel space IRQ/EXCEPTION
*/
msr daifset, #3
b arch_ret_to_user
/* the sp is reset to the outer most level, irq and fiq are disabled */
START_POINT(arch_ret_to_user)
msr daifset, #3
ldr x2, [sp, #CONTEXT_OFFSET_SP_EL0]
msr sp_el0, x2
ldr x2, [sp, #CONTEXT_OFFSET_ELR_EL1]
msr elr_el1, x2
ldr x3, [sp, #CONTEXT_OFFSET_SPSR_EL1]
msr spsr_el1, x3
/* pre-action */
bl lwp_check_debug
bl lwp_check_exit_request
cbz w0, 1f
/* exit on event */
msr daifclr, #3
mov x0, xzr
b sys_exit
1: /* handling dbg */
/* check if dbg ops exist */
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbz x0, 3f
bl dbg_thread_in_debug
mov x1, #(1 << 21)
mrs x2, spsr_el1
cbz w0, 2f
orr x2, x2, x1
msr spsr_el1, x2
b 3f
2: /* clear software step */
bic x2, x2, x1
msr spsr_el1, x2
3: /* handling signal */
/**
* push updated spsr & elr to exception frame.
* Note: these 2 maybe updated after handling dbg
*/
mrs x0, spsr_el1
str x0, [sp, #CONTEXT_OFFSET_SPSR_EL1]
mrs x1, elr_el1
str x1, [sp, #CONTEXT_OFFSET_ELR_EL1]
mov x0, sp
/* restore the thread execution environment */
msr daifclr, #3
bl lwp_thread_signal_catch
/* restore the exception-return exec-flow */
msr daifset, #3
/* check debug */
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cmp x0, xzr
beq 1f
ldr x0, [sp, #CONTEXT_OFFSET_ELR_EL1]
bl dbg_attach_req
1:
RESTORE_IRQ_CONTEXT_NO_SPEL0
eret
START_POINT_END(arch_ret_to_user)
.global lwp_check_debug
lwp_check_debug:
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbnz x0, 1f
ret
1:
stp x29, x30, [sp, #-0x10]!
bl dbg_check_suspend
cbz w0, lwp_check_debug_quit
mrs x2, sp_el0
sub x2, x2, #0x10
mov x3, x2
msr sp_el0, x2
ldr x0, =lwp_debugreturn
ldr w1, [x0]
str w1, [x2]
ldr w1, [x0, #4]
str w1, [x2, #4]
dc cvau, x2
add x2, x2, #4
dc cvau, x2
dsb sy
isb sy
ic ialluis
isb sy
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #-0x10]!
msr elr_el1, x3 /* lwp_debugreturn */
mov x1, #(SPSR_Mode(0) | SPSR_A64)
orr x1, x1, #(1 << 21)
msr spsr_el1, x1
eret
ret_from_user:
/* sp_el0 += 16 for drop ins lwp_debugreturn */
mrs x0, sp_el0
add x0, x0, #0x10
msr sp_el0, x0
/* now is el1, sp is pos(empty) - sizeof(context) */
mov x0, sp
add x0, x0, #0x220
mov sp, x0
ldp x0, x1, [sp], #0x10 /* x1 is origin spsr_el1 */
msr elr_el1, x0 /* x0 is origin elr_el1 */
msr spsr_el1, x1
lwp_check_debug_quit:
ldp x29, x30, [sp], #0x10
ret
.global arch_syscall_restart
arch_syscall_restart:
msr daifset, 3
mov sp, x1
/* drop exception frame in user stack */
msr sp_el0, x0
/* restore previous exception frame */
msr spsel, #0
RESTORE_IRQ_CONTEXT_NO_SPEL0
msr spsel, #1
b vector_exception
arch_signal_quit:
/* drop current exception frame & sigreturn */
add sp, sp, #(CONTEXT_SIZE + 0x10)
mov x1, sp
mrs x0, sp_el0
bl arch_signal_ucontext_restore
add x0, x0, #-CONTEXT_SIZE
msr sp_el0, x0
/**
* Note: Since we will reset spsr, but the reschedule will
* corrupt the spsr, we diable irq for a short period here
*/
msr daifset, #3
/* restore previous exception frame */
msr spsel, #0
RESTORE_IRQ_CONTEXT_NO_SPEL0
msr spsel, #1
SAVE_IRQ_CONTEXT
b arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> x0
* siginfo_t *psiginfo, -> x1
* void *exp_frame, -> x2
* void *entry_uaddr, -> x3
* lwp_sigset_t *save_sig_mask, -> x4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mov x19, x0
mov x20, x2 /* exp_frame */
mov x21, x3
/**
* move exception frame to user stack
*/
mrs x0, sp_el0
mov x3, x4
/* arch_signal_ucontext_save(user_sp, psiginfo, exp_frame, save_sig_mask); */
bl arch_signal_ucontext_save
mov x22, x0
/* get and saved pointer to uframe */
bl arch_signal_ucontext_get_frame
mov x2, x0
mov x0, x22
dc cvau, x0
dsb sy
ic ialluis
dsb sy
/**
* Brief: Prepare the environment for signal handler
*/
/**
* reset the cpsr
* and drop exp frame on kernel stack, reset kernel sp
*
* Note: Since we will reset spsr, but the reschedule will
* corrupt the spsr, we diable irq for a short period here
*/
msr daifset, #3
ldr x1, [x20, #CONTEXT_OFFSET_SPSR_EL1]
msr spsr_el1, x1
add sp, x20, #CONTEXT_SIZE
/** reset user sp */
msr sp_el0, x0
/** set the return address to the sigreturn */
mov x30, x0
cbnz x21, 1f
mov x21, x30
1:
/** set the entry address of signal handler */
msr elr_el1, x21
/* siginfo is above the return address */
add x1, x30, UCTX_ABI_OFFSET_TO_SI
/* uframe is saved in x2 */
mov x0, x19
/**
* handler(signo, psi, ucontext);
*
*/
eret
lwp_debugreturn:
mov x8, 0xf000
svc #0
.global lwp_sigreturn
lwp_sigreturn:
mov x8, #0xe000
svc #0
lwp_thread_return:
mov x0, xzr
mov x8, #0x01
svc #0
.globl arch_get_tidr
arch_get_tidr:
mrs x0, tpidr_el0
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
msr tpidr_el0, x0
ret

View File

@ -0,0 +1,121 @@
#include "mm_aspace.h"
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <lwp_elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
size_t rel_off;
void* addr;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
/*
memcpy(&v1, rel_dyn_start + rel_off, 4);
memcpy(&v2, rel_dyn_start + rel_off + 4, 4);
*/
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off));
addr = (void*)((char*)addr - PV_OFFSET);
memcpy(&v1, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off + 4));
addr = (void*)((char*)addr - PV_OFFSET);
memcpy(&v2, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)text_start + v1));
addr = (void*)((char*)addr - PV_OFFSET);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
// *(uint32_t*)(text_start + v1) += (uint32_t)text_start;
*(uint32_t*)addr += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
// *(uint32_t*)(text_start + v1) = (uint32_t)(text_start + dynsym[t].st_value);
*(uint32_t*)addr = (uint32_t)((char*)text_start + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
//*got_item += (uint32_t)text_start;
addr = rt_hw_mmu_v2p(aspace, got_item);
addr = (void*)((char*)addr - PV_OFFSET);
*(uint32_t *)addr += (uint32_t)text_start;
}
}
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
size_t rel_off;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
memcpy(&v1, (void*)((char*)rel_dyn_start + rel_off), 4);
memcpy(&v2, (void*)((char*)rel_dyn_start + rel_off + 4), 4);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(uint32_t*)((char*)text_start + v1) += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(uint32_t*)((char*)text_start + v1) = (uint32_t)((char*)text_start + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
*got_item += (uint32_t)text_start;
}
}
}
#endif

View File

@ -0,0 +1,254 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-28 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include <stdlib.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
return 0;
}
static struct rt_varea kuser_varea;
void arch_kuser_init(rt_aspace_t aspace, void *vectors)
{
int err;
const size_t kuser_size = 0x1000;
extern char __kuser_helper_start[];
extern char __kuser_helper_end[];
rt_base_t start = (rt_base_t)__kuser_helper_start;
rt_base_t end = (rt_base_t)__kuser_helper_end;
int kuser_sz = end - start;
err = rt_aspace_map_static(aspace, &kuser_varea, &vectors, kuser_size,
MMU_MAP_U_RO, MMF_MAP_FIXED | MMF_PREFETCH,
&rt_mm_dummy_mapper, 0);
if (err != 0)
while (1)
; // early failed
lwp_memcpy((void *)((char *)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~ARCH_PAGE_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
{
void *map = lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1;
}
}
return ret;
}
#define ALGIN_BYTES 8
#define lwp_sigreturn_bytes 8
struct signal_regs {
rt_base_t lr;
rt_base_t spsr;
rt_base_t r0_to_r12[13];
rt_base_t ip;
};
struct signal_ucontext
{
rt_base_t sigreturn[lwp_sigreturn_bytes / sizeof(rt_base_t)];
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(8)
struct signal_regs frame;
};
void *arch_signal_ucontext_restore(rt_base_t user_sp)
{
struct signal_ucontext *new_sp;
rt_base_t ip;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
ip = new_sp->frame.ip;
/* let user restore its lr from frame.ip */
new_sp->frame.ip = new_sp->frame.lr;
/* kernel will pick eip from frame.lr */
new_sp->frame.lr = ip;
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (void *)&new_sp->frame;
}
void *arch_signal_ucontext_save(rt_base_t lr, siginfo_t *psiginfo,
struct signal_regs *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask)
{
rt_base_t spsr;
struct signal_ucontext *new_sp;
new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
lwp_memcpy(&new_sp->frame.r0_to_r12, exp_frame, sizeof(new_sp->frame.r0_to_r12) + sizeof(rt_base_t));
new_sp->frame.lr = lr;
__asm__ volatile("mrs %0, spsr":"=r"(spsr));
new_sp->frame.spsr = spsr;
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
/* NO support */
return ;
}
void *arch_kernel_mmu_table_get(void)
{
return rt_kernel_space.page_table;
}
#ifdef LWP_ENABLE_ASID
#define MAX_ASID_BITS 8
#define MAX_ASID (1 << MAX_ASID_BITS)
static uint64_t global_generation = 1;
static char asid_valid_bitmap[MAX_ASID];
unsigned int arch_get_asid(struct rt_lwp *lwp)
{
if (lwp == RT_NULL)
{
// kernel
return 0;
}
if (lwp->generation == global_generation)
{
return lwp->asid;
}
if (lwp->asid && !asid_valid_bitmap[lwp->asid])
{
asid_valid_bitmap[lwp->asid] = 1;
return lwp->asid;
}
for (unsigned i = 1; i < MAX_ASID; i++)
{
if (asid_valid_bitmap[i] == 0)
{
asid_valid_bitmap[i] = 1;
lwp->generation = global_generation;
lwp->asid = i;
return lwp->asid;
}
}
global_generation++;
memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
asid_valid_bitmap[1] = 1;
lwp->generation = global_generation;
lwp->asid = 1;
asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
return lwp->asid;
}
#endif
#endif

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0xC0000000UL
#define USER_HEAP_VEND 0xB0000000UL
#define USER_HEAP_VADDR 0x80000000UL
#define USER_STACK_VSTART 0x70000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00010000UL
#define USER_LOAD_VADDR USER_VADDR_START
#ifdef __cplusplus
extern "C" {
#endif
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
rt_inline void icache_invalid_all(void)
{
__asm__ volatile ("mcr p15, 0, r0, c7, c5, 0\ndsb\nisb":::"memory");//iciallu
}
unsigned int arch_get_asid(struct rt_lwp *lwp);
struct signal_regs;
void *arch_signal_ucontext_restore(rt_base_t user_sp);
void *arch_signal_ucontext_save(rt_base_t lr, siginfo_t *psiginfo,
struct signal_regs *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask);
#ifdef __cplusplus
}
#endif
#endif
#endif /*LWP_ARCH_H__*/

View File

@ -0,0 +1,470 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include "rtconfig.h"
#include "asm-generic.h"
#define Mode_USR 0x10
#define Mode_FIQ 0x11
#define Mode_IRQ 0x12
#define Mode_SVC 0x13
#define Mode_MON 0x16
#define Mode_ABT 0x17
#define Mode_UDF 0x1B
#define Mode_SYS 0x1F
#define A_Bit 0x100
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
#define T_Bit 0x20
.cpu cortex-a9
.syntax unified
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
/* set user stack top */
cps #Mode_SYS
mov sp, r2
cps #Mode_SVC
mov r3, r2
/* set data address. */
movs pc, r1
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
cps #Mode_SYS
sub sp, r2, #16
ldr r2, =lwp_thread_return
ldr r4, [r2]
str r4, [sp]
ldr r4, [r2, #4]
str r4, [sp, #4]
ldr r4, [r2, #8]
str r4, [sp, #8]
mov r4, sp
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r4, c7, c5, 0 ;//iciallu
dsb
isb
mov lr, sp
cps #Mode_SVC
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
/* set data address. */
movs pc, r1
/*
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
*/
.global arch_set_thread_context
arch_set_thread_context:
sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
stmfd r1!, {r0}
mov r12, #0
stmfd r1!, {r12}
stmfd r1!, {r1 - r12}
stmfd r1!, {r12} /* new thread return value */
mrs r12, cpsr
orr r12, #(1 << 7) /* disable irq */
stmfd r1!, {r12} /* spsr */
mov r12, #0
stmfd r1!, {r12} /* now user lr is 0 */
stmfd r1!, {r2} /* user sp */
#ifdef RT_USING_FPU
stmfd r1!, {r12} /* not use fpu */
#endif
str r1, [r3]
mov pc, lr
.global arch_get_user_sp
arch_get_user_sp:
cps #Mode_SYS
mov r0, sp
cps #Mode_SVC
mov pc, lr
.global sys_fork
.global sys_vfork
.global arch_fork_exit
sys_fork:
sys_vfork:
push {r4 - r12, lr}
bl _sys_fork
arch_fork_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
.global sys_clone
.global arch_clone_exit
sys_clone:
push {r4 - r12, lr}
bl _sys_clone
arch_clone_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
cpsid i
mov sp, r1
mov lr, r2
mov r2, #Mode_USR
msr spsr_cxsf, r2
ldr r3, =0x80000000
b arch_ret_to_user
/*
* void SVC_Handler(void);
*/
.global vector_swi
.type vector_swi, % function
START_POINT(vector_swi)
push {lr}
mrs lr, spsr
push {r4, r5, lr}
cpsie i
push {r0 - r3, r12}
bl rt_thread_self
bl lwp_user_setting_save
and r0, r7, #0xf000
cmp r0, #0xe000
beq arch_signal_quit
cmp r0, #0xf000
beq ret_from_user
and r0, r7, #0xff
bl lwp_get_sys_api
cmp r0, #0 /* r0 = api */
mov lr, r0
pop {r0 - r3, r12}
beq arch_syscall_exit
blx lr
START_POINT_END(vector_swi)
.global arch_syscall_exit
arch_syscall_exit:
cpsid i
pop {r4, r5, lr}
msr spsr_cxsf, lr
pop {lr}
.global arch_ret_to_user
arch_ret_to_user:
/* save all context for signal handler */
push {r0-r12, lr}
bl lwp_check_debug
bl lwp_check_exit_request
cmp r0, #0
beq 1f
mov r0, #0
b sys_exit
1:
mov r0, sp
/* r0 -> exp frame */
bl lwp_thread_signal_catch
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
beq 2f
mov r0, lr
bl dbg_attach_req
2:
pop {r0-r12, lr}
movs pc, lr
#ifdef RT_USING_SMART
.global lwp_check_debug
lwp_check_debug:
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
bne 1f
bx lr
1:
push {lr}
bl dbg_check_suspend
cmp r0, #0
beq lwp_check_debug_quit
cps #Mode_SYS
sub sp, #8
ldr r0, =lwp_debugreturn
ldr r1, [r0]
str r1, [sp]
ldr r1, [r0, #4]
str r1, [sp, #4]
mov r1, sp
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
add r1, #4
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
dsb
isb
mov r0, sp /* lwp_debugreturn */
cps #Mode_SVC
mrs r1, spsr
push {r1}
mov r1, #Mode_USR
msr spsr_cxsf, r1
movs pc, r0
ret_from_user:
cps #Mode_SYS
add sp, #8
cps #Mode_SVC
/*
pop {r0 - r3, r12}
pop {r4 - r6, lr}
*/
add sp, #(4*9)
pop {r4}
msr spsr_cxsf, r4
lwp_check_debug_quit:
pop {pc}
arch_signal_quit:
cpsid i
/* drop context of signal handler */
pop {r0 - r3, r12}
pop {r4, r5, lr}
pop {lr}
/* restore context */
cps #Mode_SYS
mov r0, sp
cps #Mode_SVC
bl arch_signal_ucontext_restore
/* lr <- *(&frame.ip) */
ldr lr, [r0]
cps #Mode_SYS
mov sp, r0
/* drop ip in the frame and restore cpsr */
pop {r0}
pop {r0}
msr spsr_cxsf, r0
pop {r0-r12, lr}
cps #Mode_SVC
b arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> r0
* siginfo_t *psiginfo, -> r1
* void *exp_frame, -> r2
* void *entry_uaddr, -> r3
* lwp_sigset_t *save_sig_mask, -> ??
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mov r4, r0
mov r5, r3
mov r6, r2
cps #Mode_SYS
mov r0, lr
mov r3, sp
cps #Mode_SVC
bl arch_signal_ucontext_save
/* drop volatile frame {r0-r12, lr} */
add sp, r6, #14*4
/* reset user sp */
cps #Mode_SYS
mov sp, r0
mov lr, r0
cps #Mode_SVC
/* r1,r2 <- new_user_sp */
mov r1, r0
mov r2, r0
mcr p15, 0, r0, c7, c11, 1 ;//dc cmvau
add r0, #4
mcr p15, 0, r0, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r1, c7, c5, 0 ;//iciallu
dsb
isb
/* r0 <- signo */
mov r0, r4
/* r4 <- &sigreturn */
mov r4, r2
/* lr <- user_handler() */
mov lr, r5
cmp lr, #0
moveq lr, r4
/* r1 <- siginfo */
mov r1, r2
add r1, #8
/* handler(signo, siginfo, ucontext) */
movs pc, lr
lwp_debugreturn:
mov r7, #0xf000
svc #0
.global lwp_sigreturn
lwp_sigreturn:
mov r7, #0xe000
svc #0
lwp_thread_return:
mov r0, #0
mov r7, #0x01
svc #0
#endif
.global check_vfp
check_vfp:
#ifdef RT_USING_FPU
vmrs r0, fpexc
ubfx r0, r0, #30, #1
#else
mov r0, #0
#endif
mov pc, lr
.global get_vfp
get_vfp:
#ifdef RT_USING_FPU
vstmia r0!, {d0-d15}
vstmia r0!, {d16-d31}
vmrs r1, fpscr
str r1, [r0]
#endif
mov pc, lr
.globl arch_get_tidr
arch_get_tidr:
mrc p15, 0, r0, c13, c0, 3
bx lr
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mcr p15, 0, r0, c13, c0, 3
bx lr
/* kuser suppurt */
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60
stmfd sp!, {r4, r5, r6, lr}
ldmia r0, {r4, r5} @ load old val
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eorseq r3, r1, r5 @ compare with oldval (2)
2: stmiaeq r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
dmb
mov pc, lr
kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
1: ldr r3, [r2] @ load current val
subs r3, r3, r0 @ compare with oldval
2: streq r1, [r2] @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
mov pc, lr
kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
mov pc, lr
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:

View File

@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.S')
CPPPATH = [cwd]
group = DefineGroup('lwp-riscv', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,357 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-11-18 Jesven first version
* 2021-02-03 lizhirui port to riscv64
* 2021-02-06 lizhirui add thread filter
* 2021-02-19 lizhirui port to new version of rt-smart
* 2021-03-02 lizhirui add a auxillary function for interrupt
* 2021-03-04 lizhirui delete thread filter
* 2021-03-04 lizhirui modify for new version of rt-smart
* 2021-11-22 JasonHu add lwp_set_thread_context
* 2021-11-30 JasonHu add clone/fork support
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-10-16 Shell Support a new backtrace framework
*/
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_internal.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
#include <page.h>
#include <cpuport.h>
#include <encoding.h>
#include <stack.h>
#include <cache.h>
extern rt_ubase_t MMUTable[];
void *lwp_copy_return_code_to_user_stack()
{
void lwp_thread_return();
void lwp_thread_return_end();
rt_thread_t tid = rt_thread_self();
if (tid->user_stack != RT_NULL)
{
rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
lwp_memcpy((void *)userstack, lwp_thread_return, size);
return (void *)userstack;
}
return RT_NULL;
}
rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
{
void lwp_thread_return();
void lwp_thread_return_end();
if (cursp == 0)
{
return 0;
}
return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
}
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
{
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
}
void *get_thread_kernel_stack_top(rt_thread_t thread)
{
return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
}
void *arch_get_user_sp(void)
{
/* user sp saved in interrupt context */
rt_thread_t self = rt_thread_self();
rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
return (void *)frame->user_sp_exc_stack;
}
int arch_user_space_init(struct rt_lwp *lwp)
{
rt_ubase_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)((char *)MMUTable);
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
long _sys_clone(void *arg[]);
long sys_clone(void *arg[])
{
return _sys_clone(arg);
}
long _sys_fork(void);
long sys_fork(void)
{
return _sys_fork();
}
long _sys_vfork(void);
long sys_vfork(void)
{
return _sys_fork();
}
/**
* set exec context for fork/clone.
*/
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
void *user_stack, void **thread_sp)
{
RT_ASSERT(exit != RT_NULL);
RT_ASSERT(user_stack != RT_NULL);
RT_ASSERT(new_thread_stack != RT_NULL);
RT_ASSERT(thread_sp != RT_NULL);
struct rt_hw_stack_frame *syscall_frame;
struct rt_hw_stack_frame *thread_frame;
rt_uint8_t *stk;
rt_uint8_t *syscall_stk;
stk = (rt_uint8_t *)new_thread_stack;
/* reserve syscall context, all the registers are copyed from parent */
stk -= CTX_REG_NR * REGBYTES;
syscall_stk = stk;
syscall_frame = (struct rt_hw_stack_frame *)stk;
/* modify user sp */
syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
/* skip ecall */
syscall_frame->epc += 4;
/* child return value is 0 */
syscall_frame->a0 = 0;
syscall_frame->a1 = 0;
/* reset thread area */
rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
#ifdef ARCH_USING_NEW_CTX_SWITCH
extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
sstatus &= ~SSTATUS_SIE;
/* compatible to RESTORE_CONTEXT */
stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
#else
/* build temp thread context */
stk -= sizeof(struct rt_hw_stack_frame);
thread_frame = (struct rt_hw_stack_frame *)stk;
int i;
for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
{
((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
}
/* set pc for thread */
thread_frame->epc = (rt_ubase_t)exit;
/* set old exception mode as supervisor, because in kernel */
thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
/* set stack as syscall stack */
thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
#endif /* ARCH_USING_NEW_CTX_SWITCH */
/* save new stack top */
*thread_sp = (void *)stk;
/**
* The stack for child thread:
*
* +------------------------+ --> kernel stack top
* | syscall stack |
* | |
* | @sp | --> `user_stack`
* | @epc | --> user ecall addr + 4 (skip ecall)
* | @a0&a1 | --> 0 (for child return 0)
* | |
* +------------------------+ --> temp thread stack top
* | temp thread stack | ^
* | | |
* | @sp | ---------/
* | @epc | --> `exit` (arch_clone_exit/arch_fork_exit)
* | |
* +------------------------+ --> thread sp
*/
return 0;
}
#define ALGIN_BYTES (16)
struct signal_ucontext
{
rt_int64_t sigreturn;
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(16)
struct rt_hw_stack_frame frame;
};
void *arch_signal_ucontext_restore(rt_base_t user_sp)
{
struct signal_ucontext *new_sp;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (void *)&new_sp->frame;
}
void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask)
{
struct signal_ucontext *new_sp;
new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
const size_t lwp_sigreturn_bytes = 8;
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
/**
* synchronize dcache & icache if target is
* a Harvard Architecture machine, otherwise
* do nothing
*/
rt_hw_sync_cache_local(&new_sp->sigreturn, 8);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
/* NO support */
return ;
}
/**
* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
{
arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
}
#endif /* ARCH_MM_MMU */
int arch_backtrace_uthread(rt_thread_t thread)
{
struct rt_hw_backtrace_frame frame;
struct rt_hw_stack_frame *stack;
if (thread && thread->lwp)
{
stack = thread->user_ctx.ctx;
if ((long)stack > (unsigned long)thread->stack_addr
&& (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
{
frame.pc = stack->epc;
frame.fp = stack->s0_fp;
lwp_backtrace_frame(thread, &frame);
return 0;
}
else
return -1;
}
return -1;
}

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <rthw.h>
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef ARCH_MM_MMU
#ifdef ARCH_MM_MMU_32BIT_LIMIT
#define USER_HEAP_VADDR 0xF0000000UL
#define USER_HEAP_VEND 0xFE000000UL
#define USER_STACK_VSTART 0xE0000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_VADDR_START 0xC0000000UL
#define USER_VADDR_TOP 0xFF000000UL
#define USER_LOAD_VADDR 0xD0000000UL
#define LDSO_LOAD_VADDR USER_LOAD_VADDR
#elif defined(ARCH_REMAP_KERNEL)
#define USER_VADDR_START 0x00001000UL
#define USER_VADDR_TOP 0x003ffffff000UL
#define USER_STACK_VSTART 0x000270000000UL
#define USER_STACK_VEND (USER_HEAP_VADDR - (ARCH_PAGE_SIZE * 8)) /* start of ARGC ARGV ENVP. FIXME: space is ARG_MAX */
#define USER_HEAP_VADDR 0x000300000000UL
#define USER_HEAP_VEND USER_VADDR_TOP
#define USER_LOAD_VADDR 0x200000000
#define LDSO_LOAD_VADDR 0x200000000
#else
#define USER_HEAP_VADDR 0x300000000UL
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x370000000UL
#define USER_STACK_VEND 0x400000000UL
#define USER_VADDR_START 0x200000000UL
#define USER_VADDR_TOP 0xfffffffffffff000UL
#define USER_LOAD_VADDR 0x200000000UL
#define LDSO_LOAD_VADDR 0x200000000UL
#endif
/* this attribution is cpu specified, and it should be defined in riscv_mmu.h */
#ifndef MMU_MAP_U_RWCB
#define MMU_MAP_U_RWCB 0
#endif
#ifndef MMU_MAP_U_RW
#define MMU_MAP_U_RW 0
#endif
#ifdef __cplusplus
extern "C" {
#endif
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
rt_inline void icache_invalid_all(void)
{
rt_hw_cpu_icache_invalidate_all();
}
struct rt_hw_stack_frame;
void *arch_signal_ucontext_restore(rt_base_t user_sp);
void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask);
#ifdef __cplusplus
}
#endif
#endif
#endif /*LWP_ARCH_H__*/

View File

@ -0,0 +1,303 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
* 2021-02-03 lizhirui port to riscv64
* 2021-02-19 lizhirui port to new version of rt-smart
* 2022-11-08 Wangxiaoyao Cleanup codes;
* Support new context switch
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include "rtconfig.h"
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif /* __ASSEMBLY__ */
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
#include "asm-generic.h"
.section .text.lwp
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
// load kstack for user process
csrw sscratch, a3
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv sp, a2
sret//enter user mode
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv s0, a0
mv s1, a1
mv s2, a2
mv s3, a3
mv a0, s2
call lwp_copy_return_code_to_user_stack
mv a0, s2
call lwp_fix_sp
mv sp, a0//user_sp
mv ra, a0//return address
mv a0, s0//args
csrw sscratch, s3
sret//enter user mode
/**
* Unify exit point from kernel mode to enter user space
* we handle following things here:
* 1. restoring user mode debug state (not support yet)
* 2. handling thread's exit request
* 3. handling POSIX signal
* 4. restoring user context
* 5. jump to user mode
*/
.global arch_ret_to_user
arch_ret_to_user:
// TODO: we don't support kernel gdb server in risc-v yet
// so we don't check debug state here and handle debugging bussiness
call lwp_check_exit_request
beqz a0, 1f
mv a0, x0
call sys_exit
1:
mv a0, sp
call lwp_thread_signal_catch
ret_to_user_exit:
RESTORE_ALL
// `RESTORE_ALL` also reset sp to user sp, and setup sscratch
sret
/**
* Restore user context from exception frame stroraged in ustack
* And handle pending signals;
*/
arch_signal_quit:
LOAD a0, FRAME_OFF_SP(sp)
call arch_signal_ucontext_restore
/* reset kernel sp to the stack */
addi sp, sp, CTX_REG_NR * REGBYTES
STORE sp, FRAME_OFF_SP(a0)
/* return value is user sp */
mv sp, a0
/* restore user sp before enter trap */
addi a0, sp, CTX_REG_NR * REGBYTES
csrw sscratch, a0
RESTORE_ALL
SAVE_ALL
j arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> a0
* siginfo_t *psiginfo, -> a1
* void *exp_frame, -> a2
* void *entry_uaddr, -> a3
* lwp_sigset_t *save_sig_mask, -> a4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mv s3, a2
mv s2, a0
mv s1, a3
LOAD t0, FRAME_OFF_SP(a2)
mv a3, t0
call arch_signal_ucontext_save
/** restore kernel sp */
addi sp, s3, CTX_REG_NR * REGBYTES
/**
* set regiter RA to user signal handler
* set sp to user sp & save kernel sp in sscratch
*/
mv ra, a0
csrw sscratch, sp
mv sp, a0
/**
* s1 is signal_handler,
* s1 = !s1 ? lwp_sigreturn : s1;
*/
bnez s1, 1f
mv s1, ra
1:
/* enter user mode and enable interrupt when return to user mode */
li t0, SSTATUS_SPP
csrc sstatus, t0
li t0, SSTATUS_SPIE
csrs sstatus, t0
/* sepc <- signal_handler */
csrw sepc, s1
/* a0 <- signal id */
mv a0, s2
/* a1 <- siginfo */
add a1, sp, 16
/* dummy a2 */
mv a2, a1
/* restore user GP */
LOAD gp, FRAME_OFF_GP(s3)
/**
* handler(signo, psi, ucontext);
*/
sret
.align 3
lwp_debugreturn:
li a7, 0xff
ecall
.align 3
.global lwp_sigreturn
lwp_sigreturn:
li a7, 0xfe
ecall
.align 3
lwp_sigreturn_end:
.align 3
.global lwp_thread_return
lwp_thread_return:
li a0, 0
li a7, 1
ecall
.align 3
.global lwp_thread_return_end
lwp_thread_return_end:
.globl arch_get_tidr
arch_get_tidr:
mv a0, tp
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mv tp, a0
ret
.global arch_clone_exit
.global arch_fork_exit
arch_fork_exit:
arch_clone_exit:
j arch_syscall_exit
START_POINT(syscall_entry)
#ifndef ARCH_USING_NEW_CTX_SWITCH
//swap to thread kernel stack
csrr t0, sstatus
andi t0, t0, 0x100
beqz t0, __restore_sp_from_tcb
__restore_sp_from_sscratch: // from kernel
csrr t0, sscratch
j __move_stack_context
__restore_sp_from_tcb: // from user
jal rt_thread_self
jal get_thread_kernel_stack_top
mv t0, a0
__move_stack_context:
mv t1, sp//src
mv sp, t0//switch stack
addi sp, sp, -CTX_REG_NR * REGBYTES
//copy context
li s0, CTX_REG_NR//cnt
mv t2, sp//dst
copy_context_loop:
LOAD t0, 0(t1)
STORE t0, 0(t2)
addi s0, s0, -1
addi t1, t1, 8
addi t2, t2, 8
bnez s0, copy_context_loop
#endif /* ARCH_USING_NEW_CTX_SWITCH */
/* fetch SYSCALL ID */
LOAD a7, 17 * REGBYTES(sp)
addi a7, a7, -0xfe
beqz a7, arch_signal_quit
#ifdef ARCH_MM_MMU
/* save setting when syscall enter */
call rt_thread_self
call lwp_user_setting_save
#endif
mv a0, sp
OPEN_INTERRUPT
call syscall_handler
j arch_syscall_exit
START_POINT_END(syscall_entry)
.global arch_syscall_exit
arch_syscall_exit:
CLOSE_INTERRUPT
#if defined(ARCH_MM_MMU)
LOAD s0, FRAME_OFF_SSTATUS(sp)
andi s0, s0, 0x100
bnez s0, dont_ret_to_user
j arch_ret_to_user
#endif
dont_ret_to_user:
#ifdef ARCH_MM_MMU
/* restore setting when syscall exit */
call rt_thread_self
call lwp_user_setting_restore
/* after restore the reg `tp`, need modify context */
STORE tp, 4 * REGBYTES(sp)
#endif
//restore context
RESTORE_ALL
csrw sscratch, zero
sret

View File

@ -0,0 +1,109 @@
#include "mm_aspace.h"
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf64_Word st_name;
Elf64_Addr st_value;
Elf64_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf64_Half st_shndx;
} Elf64_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
{
size_t rel_off;
void* addr;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off));
memcpy(&v1, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off + 4));
memcpy(&v2, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void *)((rt_size_t)text_start + v1));
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(rt_size_t*)addr += (rt_size_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(rt_size_t*)addr = (((rt_size_t)text_start) + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
addr = rt_hw_mmu_v2p(aspace, got_item);
*(rt_size_t *)addr += (rt_size_t)text_start;
}
}
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
{
size_t rel_off;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
memcpy(&v1, ((rt_uint8_t *)rel_dyn_start) + rel_off, 4);
memcpy(&v2, ((rt_uint8_t *)rel_dyn_start) + rel_off + 4, 4);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(uint32_t*)(((rt_size_t)text_start) + v1) += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(uint32_t*)(((rt_size_t)text_start) + v1) = (uint32_t)(((rt_size_t)text_start) + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
*got_item += (uint32_t)text_start;
}
}
}
#endif

View File

@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.S')
CPPPATH = [cwd]
group = DefineGroup('lwp-x86-i386', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,371 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-7-14 JasonHu first version
*/
#include <rthw.h>
#include <stddef.h>
#include <rtconfig.h>
#include <rtdbg.h>
#ifdef ARCH_MM_MMU
#include <stackframe.h>
#include <interrupt.h>
#include <segment.h>
#include <mmu.h>
#include <page.h>
#include <lwp_mm_area.h>
#include <lwp_user_mm.h>
#include <lwp_arch.h>
#ifdef RT_USING_SIGNALS
#include <lwp_signal.h>
#endif /* RT_USING_SIGNALS */
extern size_t g_mmu_table[];
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~PAGE_OFFSET_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
{
void *map = lwp_map_user(lwp_self(), (void *)stack_addr, PAGE_SIZE, RT_FALSE);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1; /* map success */
}
else /* map failed, send signal SIGSEGV */
{
#ifdef RT_USING_SIGNALS
LOG_E("[fault] thread %s mapped addr %p failed!\n", rt_thread_self()->parent.name, addr);
lwp_thread_kill(rt_thread_self(), SIGSEGV);
ret = 1; /* return 1, will return back to intr, then check exit */
#endif
}
}
else /* not stack, send signal SIGSEGV */
{
#ifdef RT_USING_SIGNALS
LOG_E("[fault] thread %s access unmapped addr %p!\n", rt_thread_self()->parent.name, addr);
lwp_thread_kill(rt_thread_self(), SIGSEGV);
ret = 1; /* return 1, will return back to intr, then check exit */
#endif
}
return ret;
}
void *get_thread_kernel_stack_top(rt_thread_t thread)
{
return RT_NULL;
}
/**
* don't support this in i386, it's ok!
*/
void *arch_get_user_sp()
{
return RT_NULL;
}
int arch_user_space_init(struct rt_lwp *lwp)
{
rt_size_t *mmu_table;
mmu_table = (rt_size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return -1;
}
rt_memset(mmu_table, 0, ARCH_PAGE_SIZE);
lwp->end_heap = USER_HEAP_VADDR;
memcpy(mmu_table, g_mmu_table, ARCH_PAGE_SIZE / 4);
memset((rt_uint8_t *)mmu_table + ARCH_PAGE_SIZE / 4, 0, ARCH_PAGE_SIZE / 4 * 3);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
if (rt_hw_mmu_map_init(&lwp->mmu_info, (void*)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table, PV_OFFSET) < 0)
{
rt_pages_free(mmu_table, 0);
return -1;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)((char *)g_mmu_table);
}
void arch_user_space_vtable_free(struct rt_lwp *lwp)
{
if (lwp && lwp->mmu_info.vtable)
{
rt_pages_free(lwp->mmu_info.vtable, 0);
lwp->mmu_info.vtable = NULL;
}
}
void arch_set_thread_area(void *p)
{
rt_hw_seg_tls_set((rt_ubase_t) p);
rt_thread_t cur = rt_thread_self();
cur->thread_idr = p; /* update thread idr after first set */
}
void *arch_get_tidr(void)
{
rt_thread_t cur = rt_thread_self();
if (!cur->lwp) /* no lwp, don't get thread idr from tls seg */
return NULL;
return (void *)rt_hw_seg_tls_get(); /* get thread idr from tls seg */
}
void arch_set_tidr(void *p)
{
rt_thread_t cur = rt_thread_self();
if (!cur->lwp) /* no lwp, don't set thread idr to tls seg */
return;
rt_hw_seg_tls_set((rt_ubase_t) p); /* set tls seg addr as thread idr */
}
static void lwp_user_stack_init(rt_hw_stack_frame_t *frame)
{
frame->ds = frame->es = USER_DATA_SEL;
frame->cs = USER_CODE_SEL;
frame->ss = USER_STACK_SEL;
frame->gs = USER_TLS_SEL;
frame->fs = 0; /* unused */
frame->edi = frame->esi = \
frame->ebp = frame->esp_dummy = 0;
frame->eax = frame->ebx = \
frame->ecx = frame->edx = 0;
frame->error_code = 0;
frame->vec_no = 0;
frame->eflags = (EFLAGS_MBS | EFLAGS_IF_1 | EFLAGS_IOPL_3);
}
extern void lwp_switch_to_user(void *frame);
/**
* user entry, set frame.
* at the end of execute, we need enter user mode,
* in x86, we can set stack, arg, text entry in a stack frame,
* then pop then into register, final use iret to switch kernel mode to user mode.
*/
void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack)
{
rt_uint8_t *stk = k_stack;
stk -= sizeof(struct rt_hw_stack_frame);
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
lwp_user_stack_init(frame);
frame->esp = (rt_uint32_t)ustack - 32;
frame->ebx = (rt_uint32_t)args;
frame->eip = (rt_uint32_t)text;
lwp_switch_to_user(frame);
/* should never return */
}
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
{
arch_start_umode(args, (const void *)user_entry, (void *)USER_STACK_VEND, kernel_stack);
}
extern void lwp_thread_return();
extern void lwp_thread_return_end();
static void *lwp_copy_return_code_to_user_stack(void *ustack)
{
size_t size = (size_t)lwp_thread_return_end - (size_t)lwp_thread_return;
void *retcode = (void *)((size_t)ustack - size);
memcpy(retcode, (void *)lwp_thread_return, size);
return retcode;
}
/**
* when called sys_thread_create, need create a thread, after thread stared, will come here,
* like arch_start_umode, will enter user mode, but we must set thread exit function. it looks like:
* void func(void *arg)
* {
* ...
* }
* when thread func return, we must call exit code to exit thread, or not the program runs away.
* so we need copy exit code to user and call exit code when func return.
*/
void arch_crt_start_umode(void *args, const void *text, void *ustack, void *k_stack)
{
RT_ASSERT(ustack != NULL);
rt_uint8_t *stk;
stk = (rt_uint8_t *)((rt_uint8_t *)k_stack + sizeof(rt_ubase_t));
stk = (rt_uint8_t *)RT_ALIGN_DOWN(((rt_ubase_t)stk), sizeof(rt_ubase_t));
stk -= sizeof(struct rt_hw_stack_frame);
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
lwp_user_stack_init(frame);
/* make user thread stack */
unsigned long *retcode = lwp_copy_return_code_to_user_stack(ustack); /* copy ret code */
unsigned long *retstack = (unsigned long *)RT_ALIGN_DOWN(((rt_ubase_t)retcode), sizeof(rt_ubase_t));
/**
* x86 call stack
*
* retcode here
*
* arg n
* arg n - 1
* ...
* arg 2
* arg 1
* arg 0
* eip (caller return addr, point to retcode)
* esp
*/
*(--retstack) = (unsigned long) args; /* arg */
*(--retstack) = (unsigned long) retcode; /* ret eip */
frame->esp = (rt_uint32_t)retstack;
frame->eip = (rt_uint32_t)text;
lwp_switch_to_user(frame);
/* should never return */
}
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
{
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
}
/**
* set exec context for fork/clone.
* user_stack(unused)
*/
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp)
{
/**
* thread kernel stack was set to tss.esp0, when intrrupt/syscall occur,
* the stack frame will store in kernel stack top, so we can get the stack
* frame by kernel stack top.
*/
rt_hw_stack_frame_t *frame = (rt_hw_stack_frame_t *)((rt_ubase_t)new_thread_stack - sizeof(rt_hw_stack_frame_t));
frame->eax = 0; /* child return 0 */
rt_hw_context_t *context = (rt_hw_context_t *) (((rt_uint32_t *)frame) - HW_CONTEXT_MEMBER_NR);
context->eip = (void *)exit_addr; /* when thread started, jump to intr exit for enter user mode */
context->ebp = context->ebx = context->esi = context->edi = 0;
/**
* set sp as the address of first member of rt_hw_context,
* when scheduler call switch, pop stack from context stack.
*/
*thread_sp = (void *)&context->ebp;
/**
* after set context, the stack like this:
*
* -----------
* stack frame| eax = 0
* -----------
* context(only HW_CONTEXT_MEMBER_NR)| eip = rt_hw_intr_exit
* -----------
* thread sp | to <- rt_hw_context_switch(from, to)
* -----------
*/
}
#ifdef RT_USING_SIGNALS
#define SIGNAL_RET_CODE_SIZE 16
struct rt_signal_frame
{
char *ret_addr; /* return addr when handler return */
int signo; /* signal for user handler arg */
rt_hw_stack_frame_t frame; /* save kernel signal stack */
char ret_code[SIGNAL_RET_CODE_SIZE]; /* save return code */
};
typedef struct rt_signal_frame rt_signal_frame_t;
extern void lwp_signal_return();
extern void lwp_signal_return_end();
void lwp_try_do_signal(rt_hw_stack_frame_t *frame)
{
if (!lwp_signal_check())
return;
/* 1. backup signal mask */
int signal = lwp_signal_backup((void *) frame->esp, (void *) frame->eip, (void *) frame->eflags);
/* 2. get signal handler */
lwp_sighandler_t handler = lwp_sighandler_get(signal);
if (handler == RT_NULL) /* no handler, ignore */
{
lwp_signal_restore();
return;
}
rt_base_t level = rt_hw_interrupt_disable();
/* 3. backup frame */
rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)((frame->esp - sizeof(rt_signal_frame_t)) & -8UL);
memcpy(&sig_frame->frame, frame, sizeof(rt_hw_stack_frame_t));
sig_frame->signo = signal;
/**
* 4. copy user return code into user stack
*
* save current frame on user stack. the user stack like:
*
* ----------
* user code stack
* ----------+ -> esp before enter kernel
* signal frame
* ----------+ -> esp when handle signal handler
* signal handler stack
* ----------
*/
size_t ret_code_size = (size_t)lwp_signal_return_end - (size_t)lwp_signal_return;
memcpy(sig_frame->ret_code, (void *)lwp_signal_return, ret_code_size);
sig_frame->ret_addr = sig_frame->ret_code;
/* 5. jmp to user execute handler, update frame register info */
lwp_user_stack_init(frame);
frame->eip = (rt_uint32_t) handler;
frame->esp = (rt_uint32_t) sig_frame;
rt_hw_interrupt_enable(level);
}
void lwp_signal_do_return(rt_hw_stack_frame_t *frame)
{
/**
* ASSUME: in x86, each stack push and pop element is 4 byte. so STACK_ELEM_SIZE = sizeof(int) => 4.
* when signal handler return, the stack move to the buttom of signal frame.
* but return will pop eip from esp, then {esp += STACK_ELEM_SIZE}, thus {esp = (signal frame) + STACK_ELEM_SIZE}.
* so {(signal frame) = esp - STACK_ELEM_SIZE}
*/
rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)(frame->esp - sizeof(rt_uint32_t));
memcpy(frame, &sig_frame->frame, sizeof(rt_hw_stack_frame_t));
/**
* restore signal info, but don't use rt_user_context,
* we use sig_frame to restore stack frame
*/
lwp_signal_restore();
}
#endif /* RT_USING_SIGNALS */
#endif /* ARCH_MM_MMU */

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-07-18 JasonHu first version
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <lwp.h>
#include <lwp_arch_comm.h>
#include <stackframe.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0xFFFFF000UL
#define USER_HEAP_VEND 0xE0000000UL
#define USER_HEAP_VADDR 0x90000000UL
#define USER_STACK_VSTART 0x80000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define LDSO_LOAD_VADDR 0x70000000UL
#define USER_VADDR_START 0x40000000UL
#define USER_LOAD_VADDR USER_VADDR_START
#define SIGNAL_RETURN_SYSCAL_ID 0xe000
#ifdef __cplusplus
extern "C" {
#endif
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr);
void lwp_signal_do_return(rt_hw_stack_frame_t *frame);
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
#ifdef __cplusplus
}
#endif
#endif /* ARCH_MM_MMU */
#endif /*LWP_ARCH_H__*/

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-7-14 JasonHu first version
*/
#include "rtconfig.h"
.section .text.lwp
/*
* void lwp_switch_to_user(frame);
*/
.global lwp_switch_to_user
lwp_switch_to_user:
movl 0x4(%esp), %esp
addl $4,%esp // skip intr no
popal
popl %gs
popl %fs
popl %es
popl %ds
addl $4, %esp // skip error_code
iret // enter to user mode
.extern arch_syscall_exit
.global sys_fork
.global sys_vfork
.global arch_fork_exit
sys_fork:
sys_vfork:
jmp _sys_fork
arch_fork_exit:
jmp arch_syscall_exit
.global sys_clone
.global arch_clone_exit
sys_clone:
jmp _sys_clone
arch_clone_exit:
jmp arch_syscall_exit
/**
* rt thread return code
*/
.align 4
.global lwp_thread_return
lwp_thread_return:
movl $1, %eax // eax = 1, sys_exit
movl $0, %ebx
int $0x80
.align 4
.global lwp_thread_return_end
lwp_thread_return_end:
#ifdef RT_USING_SIGNALS
/**
* signal return code
*/
.align 4
.global lwp_signal_return
lwp_signal_return:
movl $0xe000, %eax // special syscall id for return code
int $0x80
.align 4
.global lwp_signal_return_end
lwp_signal_return_end:
#endif /* RT_USING_SIGNALS */

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-07-28 JasonHu first version
*/
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
}
#endif

View File

@ -0,0 +1,171 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-11 RT-Thread first version
*/
#ifndef __LIBC_MUSL_H__
#define __LIBC_MUSL_H__
/* from reboot.h */
#define RB_AUTOBOOT 0x01234567
#define RB_HALT_SYSTEM 0xcdef0123
#define RB_ENABLE_CAD 0x89abcdef
#define RB_DISABLE_CAD 0
#define RB_POWER_OFF 0x4321fedc
#define RB_SW_SUSPEND 0xd000fce2
#define RB_KEXEC 0x45584543
/* from internal/futex.h */
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_FD 2
#define FUTEX_REQUEUE 3
#define FUTEX_CMP_REQUEUE 4
#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
#define FUTEX_PRIVATE 128
#define FUTEX_CLOCK_REALTIME 256
#define FUTEX_WAITERS 0x80000000
#define FUTEX_OWNER_DIED 0x40000000
#define FUTEX_TID_MASK 0x3fffffff
struct robust_list
{
struct robust_list *next;
};
struct robust_list_head
{
struct robust_list list;
long futex_offset;
struct robust_list *list_op_pending;
};
/* for pmutex op */
#define PMUTEX_INIT 0
#define PMUTEX_LOCK 1
#define PMUTEX_UNLOCK 2
#define PMUTEX_DESTROY 3
/* for sys/mman.h */
#define MAP_SHARED 0x01
#define MAP_PRIVATE 0x02
#define MAP_SHARED_VALIDATE 0x03
#define MAP_TYPE 0x0f
#define MAP_FIXED 0x10
#define MAP_ANON 0x20
#define MAP_ANONYMOUS MAP_ANON
#define MAP_NORESERVE 0x4000
#define MAP_GROWSDOWN 0x0100
#define MAP_DENYWRITE 0x0800
#define MAP_EXECUTABLE 0x1000
#define MAP_LOCKED 0x2000
#define MAP_POPULATE 0x8000
#define MAP_NONBLOCK 0x10000
#define MAP_STACK 0x20000
#define MAP_HUGETLB 0x40000
#define MAP_SYNC 0x80000
#define MAP_FIXED_NOREPLACE 0x100000
#define MAP_FILE 0
#define MAP_UNINITIALIZED 0x4000000
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f
#define MAP_HUGE_16KB (14 << 26)
#define MAP_HUGE_64KB (16 << 26)
#define MAP_HUGE_512KB (19 << 26)
#define MAP_HUGE_1MB (20 << 26)
#define MAP_HUGE_2MB (21 << 26)
#define MAP_HUGE_8MB (23 << 26)
#define MAP_HUGE_16MB (24 << 26)
#define MAP_HUGE_32MB (25 << 26)
#define MAP_HUGE_256MB (28 << 26)
#define MAP_HUGE_512MB (29 << 26)
#define MAP_HUGE_1GB (30 << 26)
#define MAP_HUGE_2GB (31 << 26)
#define MAP_HUGE_16GB (34U << 26)
#define PROT_NONE 0
#define PROT_READ 1
#define PROT_WRITE 2
#define PROT_EXEC 4
#define PROT_GROWSDOWN 0x01000000
#define PROT_GROWSUP 0x02000000
#define MS_ASYNC 1
#define MS_INVALIDATE 2
#define MS_SYNC 4
#define MCL_CURRENT 1
#define MCL_FUTURE 2
#define MCL_ONFAULT 4
#define POSIX_MADV_NORMAL 0
#define POSIX_MADV_RANDOM 1
#define POSIX_MADV_SEQUENTIAL 2
#define POSIX_MADV_WILLNEED 3
#define POSIX_MADV_DONTNEED 4
#define CLONE_VM 0x00000100
#define CLONE_FS 0x00000200
#define CLONE_FILES 0x00000400
#define CLONE_SIGHAND 0x00000800
#define CLONE_PTRACE 0x00002000
#define CLONE_VFORK 0x00004000
#define CLONE_PARENT 0x00008000
#define CLONE_THREAD 0x00010000
#define CLONE_NEWNS 0x00020000
#define CLONE_SYSVSEM 0x00040000
#define CLONE_SETTLS 0x00080000
#define CLONE_PARENT_SETTID 0x00100000
#define CLONE_CHILD_CLEARTID 0x00200000
#define CLONE_DETACHED 0x00400000
#define CLONE_UNTRACED 0x00800000
#define CLONE_CHILD_SETTID 0x01000000
#define CLONE_NEWCGROUP 0x02000000
#define CLONE_NEWUTS 0x04000000
#define CLONE_NEWIPC 0x08000000
#define CLONE_NEWUSER 0x10000000
#define CLONE_NEWPID 0x20000000
#define CLONE_NEWNET 0x40000000
#define CLONE_IO 0x80000000
/* arg[] -> flags
* stack
* new_tid
* tls
* set_clear_tid_address
* quit_func
* start_args
* */
#define SYS_CLONE_ARGS_NR 7
/* wait.h */
/* options */
#define WNOHANG 1
#define WUNTRACED 2
#define WSTOPPED 2
#define WEXITED 4
#define WCONTINUED 8
#define WNOWAIT 0x1000000
#define __WNOTHREAD 0x20000000
#define __WALL 0x40000000
#define __WCLONE 0x80000000
#endif /* __LIBC_MUSL_H__ */

View File

@ -0,0 +1,607 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-12 Bernard first version
* 2018-11-02 heyuanjie fix complie error in iar
* 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
* 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
* 2023-02-20 wangxiaoyao inv icache before new app startup
* 2023-02-20 wangxiaoyao fix bug on foreground app switch
* 2023-10-16 Shell Support a new backtrace framework
* 2023-11-17 xqyjlj add process group and session support
* 2023-11-30 Shell add lwp_startup()
*/
#define DBG_TAG "lwp"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rthw.h>
#include <rtthread.h>
#include <dfs_file.h>
#include <unistd.h>
#include <stdio.h> /* rename() */
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/statfs.h> /* statfs() */
#include <lwp_elf.h>
#ifndef RT_USING_DFS
#error "lwp need file system(RT_USING_DFS)"
#endif
#include "lwp_internal.h"
#include "lwp_arch.h"
#include "lwp_arch_comm.h"
#include "lwp_signal.h"
#include "lwp_dbg.h"
#include <terminal/terminal.h>
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
#endif /* end of ARCH_MM_MMU */
#ifndef O_DIRECTORY
#define O_DIRECTORY 0x200000
#endif
#ifndef O_BINARY
#define O_BINARY 0x10000
#endif
#ifdef DFS_USING_WORKDIR
extern char working_directory[];
#endif
static int lwp_component_init(void)
{
int rc;
if ((rc = lwp_tid_init()) != RT_EOK)
{
LOG_E("%s: lwp_component_init() failed", __func__);
}
else if ((rc = lwp_pid_init()) != RT_EOK)
{
LOG_E("%s: lwp_pid_init() failed", __func__);
}
else if ((rc = rt_channel_component_init()) != RT_EOK)
{
LOG_E("%s: rt_channel_component_init failed", __func__);
}
else if ((rc = lwp_futex_init()) != RT_EOK)
{
LOG_E("%s: lwp_futex_init() failed", __func__);
}
return rc;
}
INIT_COMPONENT_EXPORT(lwp_component_init);
void lwp_setcwd(char *buf)
{
struct rt_lwp *lwp = RT_NULL;
if(strlen(buf) >= DFS_PATH_MAX)
{
rt_kprintf("buf too long!\n");
return ;
}
lwp = (struct rt_lwp *)rt_thread_self()->lwp;
if (lwp)
{
rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX - 1);
}
else
{
rt_strncpy(working_directory, buf, DFS_PATH_MAX - 1);
}
return ;
}
char *lwp_getcwd(void)
{
char *dir_buf = RT_NULL;
struct rt_lwp *lwp = RT_NULL;
rt_thread_t thread = rt_thread_self();
if (thread)
{
lwp = (struct rt_lwp *)thread->lwp;
}
if (lwp)
{
if(lwp->working_directory[0] != '/')
{
dir_buf = &working_directory[0];
}
else
{
dir_buf = &lwp->working_directory[0];
}
}
else
dir_buf = &working_directory[0];
return dir_buf;
}
/**
* RT-Thread light-weight process
*/
void lwp_set_kernel_sp(uint32_t *sp)
{
rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
}
uint32_t *lwp_get_kernel_sp(void)
{
#ifdef ARCH_MM_MMU
return (uint32_t *)rt_thread_self()->sp;
#else
uint32_t* kernel_sp;
extern rt_uint32_t rt_interrupt_from_thread;
extern rt_uint32_t rt_thread_switch_interrupt_flag;
if (rt_thread_switch_interrupt_flag)
{
kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
}
else
{
kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
}
return kernel_sp;
#endif
}
/* lwp-thread clean up routine */
void lwp_cleanup(struct rt_thread *tid)
{
struct rt_lwp *lwp;
if (tid == NULL)
{
LOG_I("%s: invalid parameter tid == NULL", __func__);
return;
}
else
LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
/**
* Brief: lwp thread cleanup
*
* Note: Critical Section
* - thread control block (RW. It's ensured that no one else can access tcb
* other than itself)
*/
lwp = (struct rt_lwp *)tid->lwp;
lwp_thread_signal_detach(&tid->signal);
/* tty will be release in lwp_ref_dec() if ref is cleared */
lwp_ref_dec(lwp);
return;
}
static void lwp_execve_setup_stdio(struct rt_lwp *lwp)
{
struct dfs_fdtable *lwp_fdt;
struct dfs_file *cons_file;
int cons_fd;
lwp_fdt = &lwp->fdt;
/* open console */
cons_fd = open("/dev/console", O_RDWR);
if (cons_fd < 0)
{
LOG_E("%s: Cannot open console tty", __func__);
return ;
}
LOG_D("%s: open console as fd %d", __func__, cons_fd);
/* init 4 fds */
lwp_fdt->fds = rt_calloc(4, sizeof(void *));
if (lwp_fdt->fds)
{
cons_file = fd_get(cons_fd);
lwp_fdt->maxfd = 4;
fdt_fd_associate_file(lwp_fdt, 0, cons_file);
fdt_fd_associate_file(lwp_fdt, 1, cons_file);
fdt_fd_associate_file(lwp_fdt, 2, cons_file);
}
close(cons_fd);
return;
}
static void _lwp_thread_entry(void *parameter)
{
rt_thread_t tid;
struct rt_lwp *lwp;
tid = rt_thread_self();
lwp = (struct rt_lwp *)tid->lwp;
tid->cleanup = lwp_cleanup;
tid->user_stack = RT_NULL;
if (lwp->debug)
{
lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
*(uint32_t *)lwp->text_entry = dbg_get_ins();
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
icache_invalid_all();
}
/**
* without ASID support, it will be a special case when trying to run application
* and exit multiple times and a same page frame allocated to it bound to
* different text segment. Then we are in a situation where icache contains
* out-of-dated data and must be handle by the running core itself.
* with ASID support, this should be a rare case that ASID & page frame both
* identical to previous running application.
*
* For a new application loaded into memory, icache are seen as empty. And there
* should be nothing in the icache entry to match. So this icache invalidation
* operation should have barely influence.
*/
rt_hw_icache_invalidate_all();
#ifdef ARCH_MM_MMU
arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
#else
arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
#endif /* ARCH_MM_MMU */
}
struct rt_lwp *lwp_self(void)
{
rt_thread_t tid;
tid = rt_thread_self();
if (tid)
{
return (struct rt_lwp *)tid->lwp;
}
return RT_NULL;
}
rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
{
/* lwp add to children link */
LWP_LOCK(parent);
child->sibling = parent->first_child;
parent->first_child = child;
child->parent = parent;
LWP_UNLOCK(parent);
LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
/* parent holds reference to child */
lwp_ref_inc(parent);
/* child holds reference to parent */
lwp_ref_inc(child);
return 0;
}
rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
{
struct rt_lwp **lwp_node;
LWP_LOCK(parent);
/* detach from children link */
lwp_node = &parent->first_child;
while (*lwp_node != child)
{
RT_ASSERT(*lwp_node != RT_NULL);
lwp_node = &(*lwp_node)->sibling;
}
(*lwp_node) = child->sibling;
child->parent = RT_NULL;
LWP_UNLOCK(parent);
LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
lwp_ref_dec(child);
lwp_ref_dec(parent);
return 0;
}
struct process_aux *argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
{
struct lwp_args_info ai;
rt_err_t error;
struct process_aux *ua;
const char **tail_argv[2] = {0};
error = lwp_args_init(&ai);
if (error)
{
return RT_NULL;
}
if (argc > 0)
{
tail_argv[0] = (void *)argv[argc - 1];
argv[argc - 1] = NULL;
lwp_args_put(&ai, (void *)argv, LWP_ARGS_TYPE_KARG);
lwp_args_put(&ai, (void *)tail_argv, LWP_ARGS_TYPE_KARG);
}
lwp_args_put(&ai, (void *)envp, LWP_ARGS_TYPE_KENVP);
ua = lwp_argscopy(lwp, &ai);
lwp_args_detach(&ai);
return ua;
}
pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
{
int result;
struct rt_lwp *lwp;
char *thread_name;
struct process_aux *aux;
int tid = 0;
if (filename == RT_NULL)
{
return -EINVAL;
}
if (access(filename, X_OK) != 0)
{
return -EACCES;
}
lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID | LWP_CREATE_FLAG_NOTRACE_EXEC);
if (lwp == RT_NULL)
{
LOG_E("lwp struct out of memory!\n");
return -ENOMEM;
}
LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
if ((tid = lwp_tid_get()) == 0)
{
lwp_ref_dec(lwp);
return -ENOMEM;
}
#ifdef ARCH_MM_MMU
if (lwp_user_space_init(lwp, 0) != 0)
{
lwp_tid_put(tid);
lwp_ref_dec(lwp);
return -ENOMEM;
}
#endif
if ((aux = argscopy(lwp, argc, argv, envp)) == RT_NULL)
{
lwp_tid_put(tid);
lwp_ref_dec(lwp);
return -ENOMEM;
}
result = lwp_load(filename, lwp, RT_NULL, 0, aux);
if (result == RT_EOK)
{
rt_thread_t thread = RT_NULL;
rt_uint32_t priority = 25, tick = 200;
lwp_execve_setup_stdio(lwp);
/* obtain the base name */
thread_name = strrchr(filename, '/');
thread_name = thread_name ? thread_name + 1 : filename;
#ifndef ARCH_MM_MMU
struct lwp_app_head *app_head = lwp->text_entry;
if (app_head->priority)
{
priority = app_head->priority;
}
if (app_head->tick)
{
tick = app_head->tick;
}
#endif /* not defined ARCH_MM_MMU */
thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
LWP_TASK_STACK_SIZE, priority, tick);
if (thread != RT_NULL)
{
struct rt_lwp *self_lwp;
rt_session_t session;
rt_processgroup_t group;
thread->tid = tid;
lwp_tid_set_thread(tid, thread);
LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
(rt_size_t)thread->stack_addr + thread->stack_size);
self_lwp = lwp_self();
/* when create init, self_lwp == null */
if (self_lwp == RT_NULL && lwp_to_pid(lwp) != 1)
{
self_lwp = lwp_from_pid_and_lock(1);
}
if (self_lwp)
{
/* lwp add to children link */
lwp_children_register(self_lwp, lwp);
}
session = RT_NULL;
group = RT_NULL;
group = lwp_pgrp_create(lwp);
if (group)
{
lwp_pgrp_insert(group, lwp);
if (self_lwp == RT_NULL)
{
session = lwp_session_create(lwp);
lwp_session_insert(session, group);
}
else
{
session = lwp_session_find(lwp_sid_get_byprocess(self_lwp));
lwp_session_insert(session, group);
}
}
thread->lwp = lwp;
#ifndef ARCH_MM_MMU
struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
thread->user_stack = app_head->stack_offset ?
(void *)(app_head->stack_offset -
app_head->data_offset +
(uint32_t)lwp->data_entry) : RT_NULL;
thread->user_stack_size = app_head->stack_size;
/* init data area */
rt_memset(lwp->data_entry, 0, lwp->data_size);
/* init user stack */
rt_memset(thread->user_stack, '#', thread->user_stack_size);
#endif /* not defined ARCH_MM_MMU */
rt_list_insert_after(&lwp->t_grp, &thread->sibling);
lwp->did_exec = RT_TRUE;
if (debug && rt_dbg_ops)
{
lwp->debug = debug;
rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
}
rt_thread_startup(thread);
return lwp_to_pid(lwp);
}
}
lwp_tid_put(tid);
lwp_ref_dec(lwp);
return -RT_ERROR;
}
#ifdef RT_USING_MUSLLIBC
extern char **__environ;
#else
char **__environ = 0;
#endif
pid_t exec(char *filename, int debug, int argc, char **argv)
{
setenv("OS", "RT-Thread", 1);
return lwp_execve(filename, debug, argc, argv, __environ);
}
#ifdef ARCH_MM_MMU
void lwp_user_setting_save(rt_thread_t thread)
{
if (thread)
{
thread->thread_idr = arch_get_tidr();
}
}
void lwp_user_setting_restore(rt_thread_t thread)
{
if (!thread)
{
return;
}
#if !defined(ARCH_RISCV64)
/* tidr will be set in RESTORE_ALL in risc-v */
arch_set_tidr(thread->thread_idr);
#endif
if (rt_dbg_ops)
{
struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
if (l != 0)
{
rt_hw_set_process_id((size_t)l->pid);
}
else
{
rt_hw_set_process_id(0);
}
if (l && l->debug)
{
uint32_t step_type = 0;
step_type = dbg_step_type();
if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
{
dbg_activate_step();
}
else
{
dbg_deactivate_step();
}
}
}
}
#endif /* ARCH_MM_MMU */
void lwp_uthread_ctx_save(void *ctx)
{
rt_thread_t thread;
thread = rt_thread_self();
thread->user_ctx.ctx = ctx;
}
void lwp_uthread_ctx_restore(void)
{
rt_thread_t thread;
thread = rt_thread_self();
thread->user_ctx.ctx = RT_NULL;
}
rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc = -RT_ERROR;
long nesting = 0;
char **argv;
rt_lwp_t lwp;
if (uthread && uthread->lwp && rt_scheduler_is_available())
{
lwp = uthread->lwp;
argv = lwp_get_command_line_args(lwp);
if (argv)
{
rt_kprintf("please use: addr2line -e %s -a -f\n", argv[0]);
lwp_free_command_line_args(argv);
}
else
{
rt_kprintf("please use: addr2line -e %s -a -f\n", lwp->cmd);
}
while (nesting < RT_BACKTRACE_LEVEL_MAX_NR)
{
rt_kprintf(" 0x%lx", frame->pc);
if (rt_hw_backtrace_frame_unwind(uthread, frame))
{
break;
}
nesting++;
}
rt_kprintf("\n");
rc = RT_EOK;
}
return rc;
}

View File

@ -0,0 +1,426 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-06-29 heyuanjie first version
* 2019-10-12 Jesven Add MMU and userspace support
* 2020-10-08 Bernard Architecture and code cleanup
* 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
* 2023-11-17 xqyjlj add process group and session support
* 2023-12-02 Shell Add macro to create lwp status and
* fix dead lock problem on pgrp
*/
/*
* RT-Thread light-weight process
*/
#ifndef __LWP_H__
#define __LWP_H__
#include <stdint.h>
#include <rthw.h>
#include <rtthread.h>
#include <dfs.h>
#include "lwp_arch.h"
#include "lwp_pid.h"
#include "lwp_ipc.h"
#include "lwp_signal.h"
#include "lwp_syscall.h"
#include "lwp_avl.h"
#include "lwp_args.h"
#include "mm_aspace.h"
#ifdef RT_USING_MUSLLIBC
#include "libc_musl.h"
#endif /* RT_USING_MUSLLIBC */
#ifdef ARCH_MM_MMU
#include "lwp_shm.h"
#include <locale.h>
#include "mmu.h"
#include "page.h"
#else
#include "lwp_mpu.h"
#endif /* ARCH_MM_MMU */
#ifdef RT_USING_MUSLLIBC
#include <locale.h>
#endif /* RT_USING_MUSLLIBC */
#ifdef __cplusplus
extern "C" {
#endif
#define LWP_MAGIC 0x5A
#define LWP_TYPE_FIX_ADDR 0x01
#define LWP_TYPE_DYN_ADDR 0x02
#define LWP_ARG_MAX 8
struct rt_lwp_objs
{
rt_aspace_t source;
struct rt_mem_obj mem_obj;
};
struct rt_lwp_notify
{
void (*notify)(rt_wqueue_t *signalfd_queue, int signo);
rt_wqueue_t *signalfd_queue;
rt_slist_t list_node;
};
struct lwp_tty;
#ifdef RT_USING_MUSLLIBC
#define LWP_COREDUMP_FLAG 0x80
#define LWP_CREATE_STAT_EXIT(exit_code) (((exit_code)&0xff) << 8)
#define LWP_CREATE_STAT_SIGNALED(signo, coredump) (((signo) & 0x7f) | (coredump ? LWP_COREDUMP_FLAG : 0))
#define LWP_CREATE_STAT_STOPPED(signo) (LWP_CREATE_STAT_EXIT(signo) | 0x7f)
#define LWP_CREATE_STAT_CONTINUED (0xffff)
#else
#error "No compatible lwp set status provided for this libc"
#endif
typedef struct rt_lwp *rt_lwp_t;
typedef struct rt_session *rt_session_t;
typedef struct rt_processgroup *rt_processgroup_t;
struct rt_session {
struct rt_object object;
rt_lwp_t leader;
rt_list_t processgroup;
pid_t sid;
pid_t foreground_pgid;
struct rt_mutex mutex;
struct lwp_tty *ctty;
};
struct rt_processgroup {
struct rt_object object;
rt_lwp_t leader;
rt_list_t process;
rt_list_t pgrp_list_node;
pid_t pgid;
pid_t sid;
struct rt_session *session;
struct rt_mutex mutex;
rt_atomic_t ref;
/* flags on process group */
unsigned int is_orphaned:1;
};
struct rt_lwp
{
#ifdef ARCH_MM_MMU
size_t end_heap;
rt_aspace_t aspace;
#else
#ifdef ARCH_MM_MPU
struct rt_mpu_info mpu_info;
#endif /* ARCH_MM_MPU */
#endif /* ARCH_MM_MMU */
#ifdef RT_USING_SMP
int bind_cpu;
#endif
uint8_t lwp_type;
uint8_t reserv[3];
/* flags */
unsigned int terminated:1;
unsigned int background:1;
unsigned int term_ctrlterm:1; /* have control terminal? */
unsigned int did_exec:1; /* Whether exec has been performed */
unsigned int jobctl_stopped:1; /* job control: current proc is stopped */
unsigned int wait_reap_stp:1; /* job control: has wait event for parent */
unsigned int sig_protected:1; /* signal: protected proc cannot be killed or stopped */
struct rt_lwp *parent; /* parent process */
struct rt_lwp *first_child; /* first child process */
struct rt_lwp *sibling; /* sibling(child) process */
struct rt_wqueue waitpid_waiters;
lwp_status_t lwp_status;
void *text_entry;
uint32_t text_size;
void *data_entry;
uint32_t data_size;
rt_atomic_t ref;
void *args;
uint32_t args_length;
pid_t pid;
pid_t sid; /* session ID */
pid_t pgid; /* process group ID */
struct rt_processgroup *pgrp;
rt_list_t pgrp_node; /* process group node */
rt_list_t t_grp; /* thread group */
rt_list_t timer; /* POSIX timer object binding to a process */
struct dfs_fdtable fdt;
char cmd[RT_NAME_MAX];
char *exe_file; /* process file path */
/* POSIX signal */
struct lwp_signal signal;
struct lwp_avl_struct *object_root;
struct rt_mutex object_mutex;
struct rt_user_context user_ctx;
struct rt_wqueue wait_queue; /* for console */
struct tty_struct *tty; /* NULL if no tty */
struct lwp_avl_struct *address_search_head; /* for addressed object fast search */
char working_directory[DFS_PATH_MAX];
int debug;
rt_uint32_t bak_first_inst; /* backup of first instruction */
struct rt_mutex lwp_lock;
rt_slist_t signalfd_notify_head;
#ifdef LWP_ENABLE_ASID
uint64_t generation;
unsigned int asid;
#endif
struct rusage rt_rusage;
#ifdef RT_USING_VDSO
void *vdso_vbase;
#endif
};
struct rt_lwp *lwp_self(void);
rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child);
rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child);
enum lwp_exit_request_type
{
LWP_EXIT_REQUEST_NONE = 0,
LWP_EXIT_REQUEST_TRIGGERED,
LWP_EXIT_REQUEST_IN_PROCESS,
};
struct termios *get_old_termios(void);
void lwp_setcwd(char *buf);
char *lwp_getcwd(void);
int lwp_check_exit_request(void);
void lwp_terminate(struct rt_lwp *lwp);
int lwp_tid_init(void);
int lwp_tid_get(void);
void lwp_tid_put(int tid);
/**
* @brief Automatically get a thread and increase a reference count
*
* @param tid queried thread ID
* @return rt_thread_t
*/
rt_thread_t lwp_tid_get_thread_and_inc_ref(int tid);
rt_thread_t lwp_tid_get_thread_raw(int tid);
/**
* @brief Decrease a reference count
*
* @param thread target thread
*/
void lwp_tid_dec_ref(rt_thread_t thread);
void lwp_tid_set_thread(int tid, rt_thread_t thread);
int lwp_execve(char *filename, int debug, int argc, char **argv, char **envp);
int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux);
void lwp_user_obj_free(struct rt_lwp *lwp);
/*create by lwp_setsid.c*/
int setsid(void);
#ifdef ARCH_MM_MMU
void lwp_aspace_switch(struct rt_thread *thread);
#endif
void lwp_user_setting_save(rt_thread_t thread);
void lwp_user_setting_restore(rt_thread_t thread);
void lwp_uthread_ctx_save(void *ctx);
void lwp_uthread_ctx_restore(void);
int lwp_setaffinity(int tid, int cpu);
pid_t exec(char *filename, int debug, int argc, char **argv);
/* ctime lwp API */
int timer_list_free(rt_list_t *timer_list);
rt_err_t lwp_futex_init(void);
rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
const struct timespec *timeout, int *uaddr2, int val3);
/* processgroup api */
rt_inline pid_t lwp_pgid_get_bypgrp(rt_processgroup_t group)
{
return group ? group->pgid : 0;
}
rt_inline pid_t lwp_pgid_get_byprocess(rt_lwp_t process)
{
return process ? process->pgid : 0;
}
rt_processgroup_t lwp_pgrp_find(pid_t pgid);
void lwp_pgrp_dec_ref(rt_processgroup_t pgrp);
rt_processgroup_t lwp_pgrp_find_and_inc_ref(pid_t pgid);
rt_processgroup_t lwp_pgrp_create(rt_lwp_t leader);
int lwp_pgrp_delete(rt_processgroup_t group);
/**
* Note: all the pgrp with process operation must be called in the context where
* process lock is taken. This is protect us from a possible dead lock condition
*
* The order is mandatory in the case:
* PGRP_LOCK(pgrp);
* LWP_LOCK(p);
* ... bussiness logic
* LWP_UNLOCK(p);
* PGRP_UNLOCK(pgrp);
*/
int lwp_pgrp_insert(rt_processgroup_t group, rt_lwp_t process);
int lwp_pgrp_remove(rt_processgroup_t group, rt_lwp_t process);
int lwp_pgrp_move(rt_processgroup_t group, rt_lwp_t process);
int lwp_pgrp_update_children_info(rt_processgroup_t group, pid_t sid, pid_t pgid);
/* session api */
rt_inline pid_t lwp_sid_get_bysession(rt_session_t session)
{
return session ? session->sid : 0;
}
rt_inline pid_t lwp_sid_get_bypgrp(rt_processgroup_t group)
{
return group ? group->sid : 0;
}
rt_inline pid_t lwp_sid_get_byprocess(rt_lwp_t process)
{
return process ? process->sid : 0;
}
rt_session_t lwp_session_find(pid_t sid);
rt_session_t lwp_session_create(struct rt_lwp *leader);
int lwp_session_delete(rt_session_t session);
/**
* Note: all the session operation must be called in the context where
* process lock is taken. This is protect us from a possible dead lock condition
*
* The order is mandatory in the case:
* PGRP_LOCK(pgrp);
* LWP_LOCK(p);
* ... bussiness logic
* LWP_UNLOCK(p);
* PGRP_UNLOCK(pgrp);
*/
int lwp_session_insert(rt_session_t session, rt_processgroup_t group);
int lwp_session_remove(rt_session_t session, rt_processgroup_t group);
int lwp_session_move(rt_session_t session, rt_processgroup_t group);
int lwp_session_update_children_info(rt_session_t session, pid_t sid);
int lwp_session_set_foreground(rt_session_t session, pid_t pgid);
/* complete the job control related bussiness on process exit */
void lwp_jobctrl_on_exit(struct rt_lwp *lwp);
sysret_t lwp_teardown(struct rt_lwp *lwp, void (*cb)(void));
#ifdef __cplusplus
}
#endif
#ifndef AUX_ARRAY_ITEMS_NR
#define AUX_ARRAY_ITEMS_NR 32
#endif
/* aux key */
#define AT_NULL 0
#define AT_IGNORE 1
#define AT_EXECFD 2
#define AT_PHDR 3
#define AT_PHENT 4
#define AT_PHNUM 5
#define AT_PAGESZ 6
#define AT_BASE 7
#define AT_FLAGS 8
#define AT_ENTRY 9
#define AT_NOTELF 10
#define AT_UID 11
#define AT_EUID 12
#define AT_GID 13
#define AT_EGID 14
#define AT_CLKTCK 17
#define AT_PLATFORM 15
#define AT_HWCAP 16
#define AT_FPUCW 18
#define AT_DCACHEBSIZE 19
#define AT_ICACHEBSIZE 20
#define AT_UCACHEBSIZE 21
#define AT_IGNOREPPC 22
#define AT_SECURE 23
#define AT_BASE_PLATFORM 24
#define AT_RANDOM 25
#define AT_HWCAP2 26
#define AT_EXECFN 31
#define AT_SYSINFO_EHDR 33
struct process_aux_item
{
size_t key;
size_t value;
};
struct process_aux
{
struct process_aux_item item[AUX_ARRAY_ITEMS_NR];
};
struct dbg_ops_t
{
int (*dbg)(int argc, char **argv);
uint32_t (*arch_get_ins)(void);
void (*arch_activate_step)(void);
void (*arch_deactivate_step)(void);
int (*check_debug_event)(struct rt_hw_exp_stack *regs, unsigned long esr);
rt_channel_t (*gdb_get_server_channel)(void);
int (*gdb_get_step_type)(void);
void (*lwp_check_debug_attach_req)(void *pc);
int (*lwp_check_debug_suspend)(void);
};
extern struct dbg_ops_t *rt_dbg_ops;
int dbg_thread_in_debug(void);
void dbg_register(struct dbg_ops_t *dbg_ops);
uint32_t dbg_get_ins(void);
void dbg_activate_step(void);
void dbg_deactivate_step(void);
int dbg_check_event(struct rt_hw_exp_stack *regs, unsigned long arg);
rt_channel_t gdb_server_channel(void);
int dbg_step_type(void);
void dbg_attach_req(void *pc);
int dbg_check_suspend(void);
void rt_hw_set_process_id(int pid);
void lwp_futex_exit_robust_list(rt_thread_t thread);
/* backtrace service */
rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame);
#endif

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-09-30 RT-Thread the general porting API for lwp
* 2023-07-18 Shell New signal arch API arch_thread_signal_enter
*/
#ifndef __LWP_ARCH_COMM__
#define __LWP_ARCH_COMM__
#include <mm_aspace.h>
#include <rtthread.h>
#include <mmu.h>
/**
* APIs that must port to all architectures
*/
/* syscall handlers */
void arch_clone_exit(void);
void arch_fork_exit(void);
void arch_syscall_exit(void);
void arch_ret_to_user(void);
/* ELF relocation */
#ifdef ARCH_MM_MMU
struct rt_lwp;
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, void *dynsym);
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, void *dynsym);
#endif
/* User entry. enter user program code for the first time */
void arch_crt_start_umode(void *args, const void *text, void *ustack, void *user_stack);
void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack);
/* lwp create and setup */
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
void *user_stack, void **thread_sp);
void *arch_get_user_sp(void);
/* user space setup and control */
int arch_user_space_init(struct rt_lwp *lwp);
void arch_user_space_free(struct rt_lwp *lwp);
void *arch_kernel_mmu_table_get(void);
void arch_kuser_init(rt_aspace_t aspace, void *vectors);
int arch_expand_user_stack(void *addr);
/* thread id register */
void arch_set_thread_area(void *p);
void* arch_get_tidr(void);
void arch_set_tidr(void *p);
/** entry point of architecture signal handling */
rt_noreturn void arch_thread_signal_enter(int signo, siginfo_t *psiginfo,
void *exp_frame, void *entry_uaddr,
lwp_sigset_t *save_sig_mask);
void arch_signal_check_erestart(void *eframe, void *ksp);
void arch_syscall_set_errno(void *eframe, int expected, int code);
int arch_backtrace_uthread(rt_thread_t thread);
#endif /* __LWP_ARCH_COMM__ */

View File

@ -0,0 +1,782 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-01-12 Shell separate argv, envp, aux processing to lwp_args.c
* Bugs fix for script arguments processing.
* support args larger than 4k
*/
#include "lwp_args.h"
#include "lwp_internal.h"
#include "mm_page.h"
static void _strvec_init(struct lwp_string_vector *sv)
{
#define DEFAUTL_ARGV_BUFLEN 4
sv->strvec = rt_malloc(DEFAUTL_ARGV_BUFLEN * sizeof(char *));
sv->strvec_buflen = DEFAUTL_ARGV_BUFLEN;
sv->string_count = 0;
}
static void _strvec_detach(struct lwp_string_vector *sv)
{
if (sv->strvec)
{
rt_free(sv->strvec);
}
}
static rt_err_t _strvec_append(struct lwp_string_vector *sv, const char *string)
{
if (sv->string_count == sv->strvec_buflen)
{
void *newptr;
newptr = rt_realloc(sv->strvec, sv->strvec_buflen * 2 * sizeof(char *));
if (!newptr)
return -RT_ENOMEM;
sv->strvec = newptr;
sv->strvec_buflen *= 2;
}
sv->strvec[sv->string_count++] = string;
return RT_EOK;
}
static rt_err_t args_append(struct lwp_args_info *ai, const char *str_addr,
size_t str_len, enum lwp_args_type atype)
{
rt_err_t error;
char *str_bufaddr;
if (ai->strings_length + str_len + 1 > ai->str_buf_size)
{
/* reallocate buffer for this */
void *newptr;
newptr = rt_realloc(ai->str_buf, ai->str_buf_size * 2);
if (!newptr)
return -RT_ENOMEM;
ai->str_buf = newptr;
ai->str_buf_size *= 2;
}
/* append new string to string buffer and update strings_length */
str_bufaddr = &ai->str_buf[ai->strings_length];
if (atype == LWP_ARGS_TYPE_KARG || atype == LWP_ARGS_TYPE_KENVP)
{
strcpy(str_bufaddr, str_addr);
ai->strings_length += str_len + 1;
}
else
{
lwp_get_from_user(str_bufaddr, (void *)str_addr, str_len);
ai->strings_length += str_len;
ai->str_buf[ai->strings_length++] = '\0';
}
/* append new argument or environment */
switch (atype)
{
case LWP_ARGS_TYPE_ARG:
case LWP_ARGS_TYPE_KARG:
error = _strvec_append(&ai->argv, str_bufaddr);
if (!error && ai->argv.string_count == 1)
{
ai->argv0_strlen = str_len;
}
break;
case LWP_ARGS_TYPE_ENVP:
case LWP_ARGS_TYPE_KENVP:
error = _strvec_append(&ai->envp, str_bufaddr);
break;
default:
break;
}
return error;
}
/**
* @brief Override arguments 0 for script interpreter.
*
* Manual: interpreter will be invoked with the following arguments:
* {interpreter [optional-arg] pathname arg...}
* where pathname is the pathname of the file specified as the first
* argument of execve(), and arg... is the series of words pointed
* to by the argv argument of execve(), starting at argv[1]. Note
* that there is no way to get the argv[0] that was passed to the
* execve() call.
*/
static rt_err_t _args_override_argv0(struct lwp_args_info *ai, struct lwp_args_info *ow_ai)
{
rt_err_t error = 0;
int i, new_argc, new_strbuf_size, ai_bytes_tobe_copied;
char **new_argv, *new_strbuf, *base;
rt_base_t off;
if (ow_ai == 0 || ow_ai->argv.string_count == 0)
{
return -RT_EINVAL;
}
/* for new argument vector */
new_argc = ai->argv.string_count - 1 + ow_ai->argv.string_count;
new_argv = rt_malloc(new_argc * sizeof(char *));
if (!new_argv)
{
return -RT_ENOMEM;
}
/* for new string buffer */
ai_bytes_tobe_copied = ai->strings_length - (ai->argv0_strlen + 1);
new_strbuf_size = ai_bytes_tobe_copied + ow_ai->strings_length;
new_strbuf = rt_malloc(new_strbuf_size);
if (!new_argv)
{
rt_free(new_argv);
return -RT_ENOMEM;
}
base = new_strbuf;
off = base - ow_ai->str_buf;
/* copy overriding argument strings and argv */
memcpy(base, ow_ai->str_buf, ow_ai->strings_length);
for (i = 0; i < ow_ai->argv.string_count; i++)
{
/* base + ow_ai->argv.strvec[i] - ow_ai->str_buf */
new_argv[i] = (char *)ow_ai->argv.strvec[i] + off;
}
base += ow_ai->strings_length;
off = base - (ai->str_buf + ai->argv0_strlen + 1);
/* copy old argument strings starting from argv[1] and setup new_argv */
memcpy(base, ai->str_buf + ai->argv0_strlen + 1, ai_bytes_tobe_copied);
for (size_t j = 1; j < ai->argv.string_count; i++, j++)
{
/* base + ai->argv->strvec[j] - ai->str_buf */
new_argv[i] = (char *)ai->argv.strvec[j] + off;
}
/* setup envp for ai */
for (i = 0; i < ai->envp.string_count; i++)
{
/* base + ai->envp->strvec[i] - ai->str_buf */
ai->envp.strvec[i] += off;
}
/* replace strings buffer and argv buffer */
ai->str_buf = new_strbuf;
ai->strings_length = new_strbuf_size;
ai->str_buf_size = new_strbuf_size;
ai->argv.string_count = new_argc;
ai->argv.strvec = (void *)new_argv;
ai->argv.strvec_buflen = new_argc;
ai->argv0_strlen = ow_ai->argv0_strlen;
return error;
}
const char *lwp_args_get_argv_0(struct lwp_args_info *ai)
{
return ai->str_buf;
}
static rt_err_t args_init(struct lwp_args_info *ai, size_t str_buf_size)
{
void *str_buf;
str_buf = rt_malloc(str_buf_size);
if (!str_buf)
return -RT_ENOMEM;
memset(ai, 0, sizeof(*ai));
_strvec_init(&ai->argv);
if (!ai->argv.strvec)
{
rt_free(str_buf);
return -RT_ENOMEM;
}
_strvec_init(&ai->envp);
if (!ai->envp.strvec)
{
rt_free(str_buf);
_strvec_detach(&ai->argv);
return -RT_ENOMEM;
}
ai->str_buf_size = str_buf_size;
ai->str_buf = str_buf;
return RT_EOK;
}
#define STR_BUF_DEFAULT_SIZE 2048
rt_err_t lwp_args_init(struct lwp_args_info *ai)
{
return args_init(ai, STR_BUF_DEFAULT_SIZE);
}
void lwp_args_detach(struct lwp_args_info *ai)
{
_strvec_detach(&ai->argv);
_strvec_detach(&ai->envp);
rt_free(ai->str_buf);
}
#ifdef ARCH_MM_MMU
struct process_aux *lwp_argscopy(struct rt_lwp *lwp, struct lwp_args_info *ai)
{
int size = sizeof(rt_base_t) * 4; /* store argc, argv_NULL, envp_NULL, aux_NULL */
char *str_ua;
const char **args_ua;
const char **iter;
rt_base_t off;
struct process_aux_item pa_item;
struct process_aux *aux_ua;
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_FIXED | MAP_PRIVATE;
rt_base_t argc = ai->argv.string_count;
rt_base_t envc = ai->envp.string_count;
/**
* counts the bytes to storage the args
*/
size += argc * sizeof(char *) + envc * sizeof(char *)
+ ai->strings_length + sizeof(struct process_aux);
args_ua = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
if (args_ua == RT_NULL)
{
return RT_NULL;
}
/**
* @brief Put data from args info to user space
* argc, argv[], NULL, envp[], NULL, aux[], NULL, strings
*/
iter = args_ua;
/* argc */
lwp_data_put(lwp, iter++, &argc, sizeof(char *));
/* strings */
str_ua = (char *)((rt_ubase_t)args_ua +
(1 + argc + 1 + envc + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(rt_base_t));
lwp_data_put(lwp, str_ua, ai->str_buf, ai->strings_length);
/* argv */
off = str_ua - ai->str_buf;
for (size_t i = 0; i < argc; i++)
{
/* str_ua + ai->argv.strvec[i] - ai->str_buf */
ai->argv.strvec[i] += off;
}
lwp_data_put(lwp, iter, ai->argv.strvec, sizeof(char *) * ai->argv.string_count);
iter += ai->argv.string_count;
/* NULL */
lwp_data_set(lwp, iter++, 0, sizeof(char *));
/* envp */
for (size_t i = 0; i < envc; i++)
{
/* str_ua + ai->envp.strvec[i] - ai->str_buf */
ai->envp.strvec[i] += off;
}
lwp_data_put(lwp, iter, ai->envp.strvec, sizeof(char *) * ai->envp.string_count);
iter += ai->envp.string_count;
/* NULL */
lwp_data_set(lwp, iter++, 0, sizeof(char *));
/* aux */
aux_ua = (struct process_aux *)iter;
pa_item.key = AT_EXECFN;
pa_item.value = (size_t)str_ua;
lwp_data_put(lwp, iter, &pa_item, sizeof(pa_item));
iter += AUX_ARRAY_ITEMS_NR * 2;
/* NULL */
lwp_data_set(lwp, iter++, 0, sizeof(char *));
lwp->args = args_ua;
return aux_ua;
}
#else
static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
{
#ifdef ARCH_MM_MMU
int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
struct process_aux *aux;
#else
int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
#endif /* ARCH_MM_MMU */
int *args;
char *str;
char **new_argve;
int i;
int len;
for (i = 0; i < argc; i++)
{
size += (rt_strlen(argv[i]) + 1);
}
size += (sizeof(int) * argc);
i = 0;
if (envp)
{
while (envp[i] != 0)
{
size += (rt_strlen(envp[i]) + 1);
size += sizeof(int);
i++;
}
}
#ifdef ARCH_MM_MMU
/* for aux */
size += sizeof(struct process_aux);
args = (int *)rt_malloc(size);
if (args == RT_NULL)
{
return RT_NULL;
}
/* argc, argv[], 0, envp[], 0 */
str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
#else
args = (int *)rt_malloc(size);
if (args == RT_NULL)
{
return RT_NULL;
}
str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
#endif /* ARCH_MM_MMU */
new_argve = (char **)&args[1];
args[0] = argc;
for (i = 0; i < argc; i++)
{
len = rt_strlen(argv[i]) + 1;
new_argve[i] = str;
lwp_memcpy(str, argv[i], len);
str += len;
}
new_argve[i] = 0;
i++;
new_argve[i] = 0;
if (envp)
{
int j;
for (j = 0; envp[j] != 0; j++)
{
len = rt_strlen(envp[j]) + 1;
new_argve[i] = str;
lwp_memcpy(str, envp[j], len);
str += len;
i++;
}
new_argve[i] = 0;
}
#ifdef ARCH_MM_MMU
/* aux */
aux = (struct process_aux *)(new_argve + i);
aux->item[0].key = AT_EXECFN;
aux->item[0].value = (uint32_t)(size_t)new_argve[0];
i += AUX_ARRAY_ITEMS_NR * 2;
new_argve[i] = 0;
lwp->args = args;
return aux;
#else
lwp->args = args;
lwp->args_length = size;
return (struct process_aux *)(new_argve + i);
#endif /* ARCH_MM_MMU */
}
#endif
rt_err_t lwp_args_put(struct lwp_args_info *args, const char **strv_addr, enum lwp_args_type atype)
{
rt_err_t error;
int iter = 0;
int len;
const char *arg_ptr;
while (1)
{
if (atype == LWP_ARGS_TYPE_ARG || atype == LWP_ARGS_TYPE_ENVP)
{
len = lwp_get_from_user(&arg_ptr, strv_addr + iter++, sizeof(char *));
if (len != sizeof(char *))
{
return -EFAULT;
}
if (arg_ptr == NULL)
{
break;
}
len = lwp_user_strlen(arg_ptr);
if (len < 0)
{
return -EFAULT;
}
}
else
{
arg_ptr = strv_addr[iter++];
if (arg_ptr == NULL)
{
break;
}
len = strlen(arg_ptr);
}
error = args_append(args, arg_ptr, len, atype);
if (error)
{
return error;
}
}
return 0;
}
/**
* @brief Put argument vector to args object
*/
rt_err_t lwp_args_put_argv(struct lwp_args_info *args, const char **argv_uaddr)
{
return lwp_args_put(args, argv_uaddr, LWP_ARGS_TYPE_ARG);
}
/**
* @brief Put argument vector to args object
*/
rt_err_t lwp_args_put_envp(struct lwp_args_info *args, const char **envp_uaddr)
{
return lwp_args_put(args, envp_uaddr, LWP_ARGS_TYPE_ENVP);
}
/**
* read words until reach nextline or EOF.
* words copied into buffer is never truncated.
*/
#define READFILE_STAT_EOF_REACHED 0
#define READFILE_STAT_NEXTLINE_REACHED 0
#define READFILE_STAT_TRUNCATED 1
#define READFILE_STAT_CAN_READMORE(stat) (stat)
static int _readfile(int fd, size_t maxbytes, char *buffer, int *p_readlen)
{
int readlen;
int stat;
char *nlp;
readlen = read(fd, buffer, maxbytes - 1);
if (readlen <= 0)
{
/* eof, failed */
stat = READFILE_STAT_EOF_REACHED;
buffer[0] = '\0';
}
else
{
if ((nlp = strchr(buffer, '\n')) == NULL)
{
if (readlen == maxbytes - 1)
{
int tailing_wordlen = 0;
char *cp = buffer + readlen - 1;
for (; *cp && *cp != ' ' && *cp != '\t'; cp--, tailing_wordlen++)
;
if (tailing_wordlen)
{
lseek(fd, -tailing_wordlen, SEEK_CUR);
readlen -= tailing_wordlen;
stat = READFILE_STAT_TRUNCATED;
}
else
{
stat = READFILE_STAT_EOF_REACHED;
}
}
else
{
stat = READFILE_STAT_EOF_REACHED;
}
}
else
{
stat = READFILE_STAT_NEXTLINE_REACHED;
readlen = nlp - buffer;
}
buffer[readlen] = '\0';
}
if (p_readlen)
*p_readlen = readlen;
return stat;
}
static char *_find_word(char *cp)
{
for (; (*cp == ' ') || (*cp == '\t'); cp++)
;
return cp;
}
static char *_seperate_and_get_nextword(char *cp)
{
/* find next whitespace */
for (; *cp && (*cp != ' ') && (*cp != '\t'); cp++)
;
/* seperate words */
while ((*cp == ' ') || (*cp == '\t'))
{
*cp++ = '\0';
}
return cp;
}
#define INTERP_BUF_SIZE 128
rt_err_t lwp_args_load_script(struct lwp_args_info *ai, const char *filename)
{
rt_err_t error = -1;
int fd = -RT_ERROR;
int len;
int rf_stat;
char interp[INTERP_BUF_SIZE];
char *cp, *nextword;
char script_magic[2];
struct lwp_args_info ow_ai = {0};
fd = open(filename, O_BINARY | O_RDONLY, 0);
if (fd < 0)
{
goto quit;
}
/**
* verify an interpreter script by matching script file magic
* eg: #!/bin/sh
*/
len = read(fd, script_magic, sizeof(script_magic));
if (len != 2 || memcmp(script_magic, "#!", sizeof(script_magic)))
{
goto quit;
}
/* setup a new args struct to save script arguments */
if (args_init(&ow_ai, INTERP_BUF_SIZE))
{
goto quit;
}
while (1)
{
/* read file to buffer (avoid any truncated words in buffer) */
rf_stat = _readfile(fd, INTERP_BUF_SIZE, interp, &len);
if (len <= 0)
{
goto quit;
}
/* find first word until reaching nil */
cp = _find_word(interp);
if (*cp == '\0')
{
if (READFILE_STAT_CAN_READMORE(rf_stat))
continue;
else
break;
}
do
{
nextword = _seperate_and_get_nextword(cp);
args_append(&ow_ai, cp, strlen(cp), LWP_ARGS_TYPE_KARG);
cp = nextword;
}
while (*cp);
if (READFILE_STAT_CAN_READMORE(rf_stat))
continue;
else
break;
}
if (ow_ai.argv.string_count == 0)
{
goto quit; /* No interpreter name found */
}
args_append(&ow_ai, filename, strlen(filename), LWP_ARGS_TYPE_KARG);
error = _args_override_argv0(ai, &ow_ai);
if (error)
{
goto quit;
}
quit:
lwp_args_detach(&ow_ai);
if (fd >= 0)
{
close(fd);
}
return error;
}
char **lwp_get_command_line_args(struct rt_lwp *lwp)
{
size_t argc = 0;
char **argv = NULL;
int ret;
size_t i;
size_t len;
if (lwp)
{
ret = lwp_data_get(lwp, &argc, lwp->args, sizeof(argc));
if (ret == 0)
{
return RT_NULL;
}
argv = (char**)rt_calloc((argc + 1), sizeof(char*));
if (argv)
{
for (i = 0; i < argc; i++)
{
char *argvp = NULL;
ret = lwp_data_get(lwp, &argvp, &((char **)lwp->args)[1 + i], sizeof(argvp));
if (ret == 0)
{
goto error_exit;
}
len = lwp_user_strlen_ext(lwp, argvp);
if (len >= 0)
{
argv[i] = (char*)rt_malloc(len + 1);
ret = lwp_data_get(lwp, argv[i], argvp, len);
if (ret != len)
{
goto error_exit;
}
argv[i][len] = '\0';
}
else
{
goto error_exit;
}
}
argv[argc] = NULL;
}
}
return argv;
error_exit:
lwp_free_command_line_args(argv);
return RT_NULL;
}
void lwp_print_envp(struct rt_lwp *lwp)
{
rt_size_t envp_counts;
char **kenvp_array = lwp_get_envp(lwp, &envp_counts);
if (kenvp_array)
{
rt_kprintf("envp_counts: %d\n", envp_counts);
for (size_t i = 0; i < envp_counts; i++)
{
rt_kprintf("envp[%d]: %s\n", i, kenvp_array[i]);
}
}
lwp_free_command_line_args(kenvp_array);
return ;
}
char** lwp_get_envp(struct rt_lwp *lwp, rt_size_t *penvp_counts)
{
int ret, len;
rt_base_t argc;
char **p_kenvp = RT_NULL;
char *envp, **p_envp;
size_t envp_counts = 0;
if (lwp)
{
ret = lwp_data_get(lwp, &argc, lwp->args, sizeof(argc));
if (ret == 0)
{
return RT_NULL;
}
p_envp = (char **)lwp->args + 1 + argc + 1;
/* counts envp */
while (lwp_data_get(lwp, &envp, p_envp, sizeof(void *)) == sizeof(void *)
&& envp != NULL)
{
p_envp++;
envp_counts++;
}
p_kenvp = (char **)rt_malloc((envp_counts + 1) * sizeof(char *));
if (p_kenvp)
{
/* copy env from envp array */
p_envp = (char **)lwp->args + 1 + argc + 1;
for (size_t i = 0; i < envp_counts; i++)
{
ret = lwp_data_get(lwp, &envp, &p_envp[i], sizeof(char *));
if (ret != sizeof(char **))
{
lwp_free_command_line_args(p_kenvp);
return RT_NULL;
}
len = lwp_user_strlen_ext(lwp, envp);
if (len > 0)
{
p_kenvp[i] = (char*)rt_malloc(len + 1);
ret = lwp_data_get(lwp, p_kenvp[i], envp, len + 1);
if (ret != len + 1)
{
lwp_free_command_line_args(p_kenvp);
return RT_NULL;
}
}
else
{
p_kenvp[i] = NULL;
}
}
if (penvp_counts)
*penvp_counts = envp_counts;
p_kenvp[envp_counts] = NULL;
}
}
return p_kenvp;
}
void lwp_free_command_line_args(char** argv)
{
size_t i;
if (argv)
{
for (i = 0; argv[i]; i++)
{
rt_free(argv[i]);
}
rt_free(argv);
}
}

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-01-12 Shell separate argv, envp, aux processing from execve(2).
* Bugs fix for script arguments processing.
*/
#ifndef __LWP_ARGV_H__
#define __LWP_ARGV_H__
#include <rtthread.h>
struct rt_lwp;
enum lwp_args_type {
LWP_ARGS_TYPE_ARG,
LWP_ARGS_TYPE_KARG,
LWP_ARGS_TYPE_ENVP,
LWP_ARGS_TYPE_KENVP,
LWP_ARGS_TYPE_NULLPTR
};
struct lwp_string_vector
{
const char **strvec;
rt_uint32_t strvec_buflen;
rt_uint32_t string_count;
};
struct lwp_args_info
{
int argv0_strlen;
int strings_length;
int str_buf_size;
char *str_buf;
struct lwp_string_vector argv;
struct lwp_string_vector envp;
};
rt_err_t lwp_args_init(struct lwp_args_info *ai);
void lwp_args_detach(struct lwp_args_info *ai);
struct process_aux *lwp_argscopy(struct rt_lwp *lwp, struct lwp_args_info *args_info);;
rt_err_t lwp_args_put(struct lwp_args_info *args, const char **strv_addr, enum lwp_args_type atype);
rt_err_t lwp_args_put_argv(struct lwp_args_info *args, const char **argv_uaddr);
rt_err_t lwp_args_put_envp(struct lwp_args_info *args, const char **envp_uaddr);
rt_err_t lwp_args_load_script(struct lwp_args_info *args, const char *filename);
const char *lwp_args_get_argv_0(struct lwp_args_info *ai);
char** lwp_get_envp(struct rt_lwp *lwp, rt_size_t *penvp_counts);
void lwp_print_envp(struct rt_lwp *lwp);
char** lwp_get_command_line_args(struct rt_lwp *lwp);
void lwp_free_command_line_args(char** argv);
#endif /* __LWP_ARGV_H__ */

View File

@ -0,0 +1,227 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-12 Jesven first version
*/
#include <rtthread.h>
#include <lwp_avl.h>
static void lwp_avl_rebalance(struct lwp_avl_struct ***nodeplaces_ptr, int count)
{
for (; count > 0; count--)
{
struct lwp_avl_struct **nodeplace = *--nodeplaces_ptr;
struct lwp_avl_struct *node = *nodeplace;
struct lwp_avl_struct *nodeleft = node->avl_left;
struct lwp_avl_struct *noderight = node->avl_right;
int heightleft = heightof(nodeleft);
int heightright = heightof(noderight);
if (heightright + 1 < heightleft)
{
struct lwp_avl_struct *nodeleftleft = nodeleft->avl_left;
struct lwp_avl_struct *nodeleftright = nodeleft->avl_right;
int heightleftright = heightof(nodeleftright);
if (heightof(nodeleftleft) >= heightleftright)
{
node->avl_left = nodeleftright;
nodeleft->avl_right = node;
nodeleft->avl_height = 1 + (node->avl_height = 1 + heightleftright);
*nodeplace = nodeleft;
}
else
{
nodeleft->avl_right = nodeleftright->avl_left;
node->avl_left = nodeleftright->avl_right;
nodeleftright->avl_left = nodeleft;
nodeleftright->avl_right = node;
nodeleft->avl_height = node->avl_height = heightleftright;
nodeleftright->avl_height = heightleft;
*nodeplace = nodeleftright;
}
}
else if (heightleft + 1 < heightright)
{
struct lwp_avl_struct *noderightright = noderight->avl_right;
struct lwp_avl_struct *noderightleft = noderight->avl_left;
int heightrightleft = heightof(noderightleft);
if (heightof(noderightright) >= heightrightleft)
{
node->avl_right = noderightleft;
noderight->avl_left = node;
noderight->avl_height = 1 + (node->avl_height = 1 + heightrightleft);
*nodeplace = noderight;
}
else
{
noderight->avl_left = noderightleft->avl_right;
node->avl_right = noderightleft->avl_left;
noderightleft->avl_right = noderight;
noderightleft->avl_left = node;
noderight->avl_height = node->avl_height = heightrightleft;
noderightleft->avl_height = heightright;
*nodeplace = noderightleft;
}
}
else
{
int height = (heightleft < heightright ? heightright : heightleft) + 1;
if (height == node->avl_height)
break;
node->avl_height = height;
}
}
}
void lwp_avl_remove(struct lwp_avl_struct *node_to_delete, struct lwp_avl_struct **ptree)
{
avl_key_t key = node_to_delete->avl_key;
struct lwp_avl_struct **nodeplace = ptree;
struct lwp_avl_struct **stack[avl_maxheight];
uint32_t stack_count = 0;
struct lwp_avl_struct ***stack_ptr = &stack[0]; /* = &stack[stackcount] */
struct lwp_avl_struct **nodeplace_to_delete;
for (;;)
{
struct lwp_avl_struct *node = *nodeplace;
if (node == AVL_EMPTY)
{
return;
}
*stack_ptr++ = nodeplace;
stack_count++;
if (key == node->avl_key)
break;
if (key < node->avl_key)
nodeplace = &node->avl_left;
else
nodeplace = &node->avl_right;
}
nodeplace_to_delete = nodeplace;
if (node_to_delete->avl_left == AVL_EMPTY)
{
*nodeplace_to_delete = node_to_delete->avl_right;
stack_ptr--;
stack_count--;
}
else
{
struct lwp_avl_struct ***stack_ptr_to_delete = stack_ptr;
struct lwp_avl_struct **nodeplace = &node_to_delete->avl_left;
struct lwp_avl_struct *node;
for (;;)
{
node = *nodeplace;
if (node->avl_right == AVL_EMPTY)
break;
*stack_ptr++ = nodeplace;
stack_count++;
nodeplace = &node->avl_right;
}
*nodeplace = node->avl_left;
node->avl_left = node_to_delete->avl_left;
node->avl_right = node_to_delete->avl_right;
node->avl_height = node_to_delete->avl_height;
*nodeplace_to_delete = node;
*stack_ptr_to_delete = &node->avl_left;
}
lwp_avl_rebalance(stack_ptr, stack_count);
}
void lwp_avl_insert(struct lwp_avl_struct *new_node, struct lwp_avl_struct **ptree)
{
avl_key_t key = new_node->avl_key;
struct lwp_avl_struct **nodeplace = ptree;
struct lwp_avl_struct **stack[avl_maxheight];
int stack_count = 0;
struct lwp_avl_struct ***stack_ptr = &stack[0]; /* = &stack[stackcount] */
for (;;)
{
struct lwp_avl_struct *node = *nodeplace;
if (node == AVL_EMPTY)
break;
*stack_ptr++ = nodeplace;
stack_count++;
if (key < node->avl_key)
nodeplace = &node->avl_left;
else
nodeplace = &node->avl_right;
}
new_node->avl_left = AVL_EMPTY;
new_node->avl_right = AVL_EMPTY;
new_node->avl_height = 1;
*nodeplace = new_node;
lwp_avl_rebalance(stack_ptr, stack_count);
}
struct lwp_avl_struct *lwp_avl_find(avl_key_t key, struct lwp_avl_struct *ptree)
{
for (;;)
{
if (ptree == AVL_EMPTY)
{
return (struct lwp_avl_struct *)0;
}
if (key == ptree->avl_key)
break;
if (key < ptree->avl_key)
ptree = ptree->avl_left;
else
ptree = ptree->avl_right;
}
return ptree;
}
int lwp_avl_traversal(struct lwp_avl_struct *ptree, int (*fun)(struct lwp_avl_struct *, void *), void *arg)
{
int ret;
if (!ptree)
{
return 0;
}
if (ptree->avl_left)
{
ret = lwp_avl_traversal(ptree->avl_left, fun, arg);
if (ret != 0)
{
return ret;
}
}
ret = (*fun)(ptree, arg);
if (ret != 0)
{
return ret;
}
if (ptree->avl_right)
{
ret = lwp_avl_traversal(ptree->avl_right, fun, arg);
if (ret != 0)
{
return ret;
}
}
return ret;
}
rt_weak struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree)
{
if (ptree == AVL_EMPTY)
{
return (struct lwp_avl_struct *)0;
}
while (1)
{
if (!ptree->avl_left)
{
break;
}
ptree = ptree->avl_left;
}
return ptree;
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-12 Jesven first version
*/
#ifndef LWP_AVL_H__
#define LWP_AVL_H__
#include <rtthread.h>
#include <string.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define avl_key_t size_t
#define AVL_EMPTY (struct lwp_avl_struct *)0
#define avl_maxheight 32
#define heightof(tree) ((tree) == AVL_EMPTY ? 0 : (tree)->avl_height)
struct lwp_avl_struct
{
struct lwp_avl_struct *avl_left;
struct lwp_avl_struct *avl_right;
int avl_height;
avl_key_t avl_key;
void *data;
};
void lwp_avl_remove(struct lwp_avl_struct * node_to_delete, struct lwp_avl_struct ** ptree);
void lwp_avl_insert (struct lwp_avl_struct * new_node, struct lwp_avl_struct ** ptree);
struct lwp_avl_struct* lwp_avl_find(avl_key_t key, struct lwp_avl_struct* ptree);
int lwp_avl_traversal(struct lwp_avl_struct* ptree, int (*fun)(struct lwp_avl_struct*, void *), void *arg);
struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree);
#ifdef __cplusplus
}
#endif
#endif /* LWP_AVL_H__ */

View File

@ -0,0 +1,119 @@
#include <rtthread.h>
#include <rthw.h>
#include <lwp.h>
int dbg_thread_in_debug(void)
{
int ret = 0;
struct rt_lwp *lwp = lwp_self();
if (lwp && lwp->debug)
{
ret = 1;
}
return ret;
}
struct dbg_ops_t *rt_dbg_ops = RT_NULL;
RTM_EXPORT(rt_dbg_ops);
void dbg_register(struct dbg_ops_t *dbg_ops)
{
rt_dbg_ops = dbg_ops;
}
RTM_EXPORT(dbg_register);
static int dbg(int argc, char **argv)
{
int ret = -1;
if (rt_dbg_ops)
{
ret = rt_dbg_ops->dbg(argc, argv);
}
else
{
rt_kprintf("Error: DBG command is not enabled!\n");
}
return ret;
}
MSH_CMD_EXPORT(dbg, dbg);
uint32_t dbg_get_ins(void)
{
uint32_t ins = 0;
if (rt_dbg_ops)
{
ins = rt_dbg_ops->arch_get_ins();
}
return ins;
}
void dbg_activate_step(void)
{
if (rt_dbg_ops)
{
rt_dbg_ops->arch_activate_step();
}
}
void dbg_deactivate_step(void)
{
if (rt_dbg_ops)
{
rt_dbg_ops->arch_deactivate_step();
}
}
int dbg_check_event(struct rt_hw_exp_stack *regs, unsigned long esr)
{
int ret = 0;
if (rt_dbg_ops)
{
ret = rt_dbg_ops->check_debug_event(regs, esr);
}
return ret;
}
rt_channel_t gdb_server_channel(void)
{
rt_channel_t ret = RT_NULL;
if (rt_dbg_ops)
{
ret = rt_dbg_ops->gdb_get_server_channel();
}
return ret;
}
int dbg_step_type(void)
{
int ret = 0;
if (rt_dbg_ops)
{
ret = rt_dbg_ops->gdb_get_step_type();
}
return ret;
}
void dbg_attach_req(void *pc)
{
if (rt_dbg_ops)
{
rt_dbg_ops->lwp_check_debug_attach_req(pc);
}
}
int dbg_check_suspend(void)
{
int ret = 0;
if (rt_dbg_ops)
{
ret = rt_dbg_ops->lwp_check_debug_suspend();
}
return ret;
}

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-11 RT-Thread first version
*/
#ifndef __LWP_DBG_H__
#define __LWP_DBG_H__
#include <rtthread.h>
#include <rthw.h>
#include <lwp.h>
int dbg_thread_in_debug(void);
void dbg_register(struct dbg_ops_t *dbg_ops);
uint32_t dbg_get_ins(void);
void dbg_activate_step(void);
void dbg_deactivate_step(void);
int dbg_check_event(struct rt_hw_exp_stack *regs, unsigned long esr);
rt_channel_t gdb_server_channel(void);
int dbg_step_type(void);
void dbg_attach_req(void *pc);
int dbg_check_suspend(void);
#endif /* __LWP_DBG_H__ */

View File

@ -0,0 +1,826 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-23 zhangsz first version
*/
#include <rtthread.h>
#ifdef RT_USING_LDSO
#include <dfs_file.h>
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <lwp_elf.h>
#include "lwp.h"
#include "lwp_arch.h"
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
#endif
#ifdef RT_USING_VDSO
#include <vdso.h>
#endif
#define DBG_TAG "load.elf"
#ifdef ELF_DEBUG_ENABLE
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif
#include <rtdbg.h>
#define ELF_INVALID_FD -1
#define ELF_PHDR_NUM_MAX 128
#define FILE_LENGTH_MAX 0xC0000000
#define MEM_SIZE_MAX 0xC0000000
#define ELF_PATH_MAX 256
#define FLF_PATH_MIN 1
#define ELF_PAGESTART(_v) ((_v) & ~(rt_ubase_t)(ARCH_PAGE_SIZE - 1))
#define ELF_PAGEOFFSET(_v) ((_v) & (ARCH_PAGE_SIZE - 1))
#define ELF_PAGEALIGN(_v) (((_v) + ARCH_PAGE_SIZE - 1) & ~(ARCH_PAGE_SIZE - 1))
#define ELF_EXEC_LOAD_ADDR USER_VADDR_START
#define ELF_INTERP_LOAD_ADDR LDSO_LOAD_VADDR
#define ELF_AUX_ENT(aux, id, val) \
do \
{ \
rt_base_t a = id; \
lwp_data_put(lwp, aux++, &a, sizeof(rt_ubase_t)); \
a = val; \
lwp_data_put(lwp, aux++, &a, sizeof(rt_ubase_t)); \
} while (0)
typedef struct
{
int fd;
char *filename;
rt_size_t file_len;
Elf_Ehdr ehdr;
Elf_Phdr *phdr;
rt_ubase_t map_size;
} elf_info_t;
typedef struct
{
struct rt_lwp *lwp;
struct process_aux *aux;
elf_info_t exec_info;
elf_info_t interp_info;
rt_ubase_t load_addr;
rt_ubase_t e_entry;
rt_ubase_t interp_base;
} elf_load_info_t;
static void elf_user_dump(struct rt_lwp *lwp, void *va, size_t len)
{
#ifdef ELF_DEBUG_DUMP
uint8_t *k_va;
int ret;
if (len < 16)
len = 16;
rt_kprintf("\r\n");
rt_kprintf("%s : user va : %p, len : 0x%x(%d)\n", __func__, va, len, len);
k_va = rt_malloc(len);
if (k_va == RT_NULL)
{
rt_kprintf("%s : malloc failed\n", __func__);
return;
}
rt_memset(k_va, 0, len);
ret = lwp_data_get(lwp, k_va, va, len);
if (ret != len)
{
rt_kprintf("%s : lwp_get_from_user failed, ret = %d\n", __func__, ret);
return;
}
rt_kprintf("%s : k_va : %p\n", __func__, k_va);
for (size_t i = 0; i < len; i += 16)
{
rt_kprintf(" %02x %02x %02x %02x %02x %02x %02x %02x ", k_va[i], k_va[i+1], k_va[i+2], k_va[i+3],
k_va[i+4], k_va[i+5], k_va[i+6], k_va[i+7]);
rt_kprintf(" %02x %02x %02x %02x %02x %02x %02x %02x \n", k_va[i+8], k_va[i+9], k_va[i+10], k_va[i+11],
k_va[i+12], k_va[i+13], k_va[i+14], k_va[i+15]);
}
rt_kprintf("\r\n");
rt_free(k_va);
#endif
}
rt_ubase_t elf_random_offset(void)
{
#ifdef ELF_LOAD_RANDOMIZE
return (rt_tick_get() % 65535) * ARCH_PAGE_SIZE;
#else
return ELF_PAGEALIGN(0);
#endif
}
static void *file_mmap(struct rt_lwp *lwp, int fd, rt_ubase_t load_addr,
rt_ubase_t map_size, size_t prot, size_t flags, rt_ubase_t offset)
{
uint8_t *map_va;
map_va = (uint8_t *)lwp_mmap2(lwp, (void *)load_addr, map_size, prot, flags, fd, offset >> ARCH_PAGE_SHIFT);
if (!map_va || (map_va != (uint8_t *)load_addr))
{
LOG_E("%s : lwp map user failed!", __func__);
return RT_NULL;
}
LOG_D(" %s : map va = %p load_addr : %p size : 0x%x", __func__, map_va, load_addr, map_size);
return map_va;
}
static int elf_file_open(const char *filename)
{
int fd = -1;
fd = open(filename, O_BINARY | O_RDONLY, 0);
if (fd < 0)
{
LOG_E("%s : elf file [%s] open failed!", __func__, filename);
}
return fd;
}
static int elf_file_close(int fd)
{
return close(fd);
}
static int elf_file_length(char *filename, rt_size_t *file_len)
{
int ret;
struct stat s = { 0 };
ret = stat(filename, &s);
if (ret != 0)
{
LOG_E("%s : error", __func__);
return -RT_ERROR;
}
*file_len = (rt_size_t)s.st_size;
return RT_EOK;
}
static int elf_file_read(rt_int32_t fd, rt_uint8_t *buffer, size_t size, off_t offset)
{
ssize_t read_len;
off_t pos;
if (size > 0)
{
pos = lseek(fd, offset, SEEK_SET);
if (pos != offset)
{
LOG_E("%s : seek file offset: 0x%x failed", __func__, offset);
return -RT_ERROR;
}
read_len = read(fd, buffer, size);
if (read_len != size)
{
LOG_E("%s : read from offset: 0x%x error", __func__, offset);
return -RT_ERROR;
}
}
return RT_EOK;
}
static rt_int32_t elf_check_ehdr(const Elf_Ehdr *ehdr, rt_uint32_t file_len)
{
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
{
LOG_E("%s : e_ident error", __func__);
return -RT_ERROR;
}
if ((ehdr->e_type != ET_EXEC) && (ehdr->e_type != ET_DYN))
{
LOG_E("%s : e_type error", __func__);
return -RT_ERROR;
}
if (ehdr->e_machine == EM_NONE)
{
LOG_E("%s : e_machine is EM_NONE", __func__);
return -RT_ERROR;
}
if (ehdr->e_phnum > ELF_PHDR_NUM_MAX)
{
LOG_E("%s : e_phnum error", __func__);
return -RT_ERROR;
}
if (ehdr->e_phoff > file_len)
{
LOG_E("%s : e_phoff error", __func__);
return -RT_ERROR;
}
LOG_D("%s : e_entry : 0x%x", __func__, ehdr->e_entry);
return RT_EOK;
}
static int elf_check_phdr(const Elf_Phdr *phdr)
{
if (phdr->p_filesz > FILE_LENGTH_MAX)
{
LOG_E("%s : phdr p_filesz 0x%x error", __func__, phdr->p_filesz);
return -RT_ERROR;
}
if (phdr->p_offset > FILE_LENGTH_MAX)
{
LOG_E("%s : phdr p_offset 0x%x error", __func__, phdr->p_offset);
return -RT_ERROR;
}
if (phdr->p_memsz > MEM_SIZE_MAX)
{
LOG_E("%s[%d], phdr p_memsz 0x%x error", __func__, phdr->p_memsz);
return -RT_ERROR;
}
LOG_D("%s : phdr p_vaddr : 0x%x", __func__, phdr->p_vaddr);
return RT_EOK;
}
static int elf_load_ehdr(elf_info_t *elf_info)
{
int ret;
ret = elf_file_open(elf_info->filename);
if (ret < 0)
{
LOG_E("%s : elf_file_open %s failed", __func__, elf_info->filename);
return ret;
}
elf_info->fd = ret;
ret = elf_file_length(elf_info->filename, &elf_info->file_len);
if (ret != RT_EOK)
{
return -RT_ERROR;
}
ret = elf_file_read(elf_info->fd, (rt_uint8_t *)&elf_info->ehdr, sizeof(Elf_Ehdr), 0);
if (ret != RT_EOK)
{
LOG_E("%s : elf_file_read failed, ret : %d", __func__, ret);
return -RT_ERROR;
}
ret = elf_check_ehdr(&elf_info->ehdr, elf_info->file_len);
if (ret != RT_EOK)
{
LOG_E("%s : elf_check_ehdr failed, ret : %d", __func__, ret);
return -RT_ERROR;
}
return RT_EOK;
}
static int elf_load_phdr(elf_info_t *elf_info)
{
Elf_Ehdr *ehdr = &elf_info->ehdr;
uint32_t size;
int ret;
if (ehdr->e_phnum < 1)
{
return -RT_ERROR;
}
if (ehdr->e_phentsize != sizeof(Elf_Phdr))
{
return -RT_ERROR;
}
size = sizeof(Elf_Phdr) * ehdr->e_phnum;
if ((ehdr->e_phoff + size) > elf_info->file_len)
{
return -RT_ERROR;
}
elf_info->phdr = rt_malloc(size);
if (elf_info->phdr == RT_NULL)
{
LOG_E("%s : alloc phdr failed", __func__);
return -RT_ENOMEM;
}
ret = elf_file_read(elf_info->fd, (rt_uint8_t *)elf_info->phdr, size, ehdr->e_phoff);
if (ret != RT_EOK)
{
rt_free(elf_info->phdr);
elf_info->phdr = RT_NULL;
LOG_E("%s : elf_file_read failed, ret = %d", __func__, ret);
return -RT_ERROR;
}
return RT_EOK;
}
static int elf_load_interp(elf_load_info_t *load_info)
{
Elf_Phdr *phdr = load_info->exec_info.phdr;
int ret;
int i;
for (i = 0; i < load_info->exec_info.ehdr.e_phnum; ++i, ++phdr)
{
if (phdr->p_type != PT_INTERP)
{
continue;
}
if (elf_check_phdr(phdr) != RT_EOK)
{
return -RT_ERROR;
}
if ((phdr->p_filesz > ELF_PATH_MAX) || (phdr->p_filesz < FLF_PATH_MIN))
{
LOG_E("%s : phdr p_filesz error", __func__, phdr->p_filesz);
return -RT_ERROR;
}
if (phdr->p_offset + phdr->p_filesz > load_info->exec_info.file_len)
{
LOG_E("%s : phdr p_offset error", __func__, phdr->p_offset);
return -RT_ERROR;
}
load_info->interp_info.filename = rt_malloc(phdr->p_filesz);
if (load_info->interp_info.filename == RT_NULL)
{
LOG_E("%s : alloc elf interpreter failed", __func__);
return -RT_ENOMEM;
}
ret = elf_file_read(load_info->exec_info.fd, (rt_uint8_t *)load_info->interp_info.filename,
phdr->p_filesz, phdr->p_offset);
if (ret != RT_EOK)
{
LOG_E("%s : elf_file_read failed, ret = %d", __func__, ret);
ret = -RT_ERROR;
goto error_exit;
}
if (load_info->interp_info.filename[phdr->p_filesz - 1] != '\0')
{
LOG_E("%s : elf interpreter is invalid", __func__);
ret = -RT_ERROR;
goto error_exit;
}
LOG_D("%s : elf interpreter : %s", __func__, load_info->interp_info.filename);
ret = elf_load_ehdr(&load_info->interp_info);
if (ret != RT_EOK)
{
LOG_E("%s : elf_load_ehdr failed, ret = %d", __func__, ret);
goto error_exit;
}
ret = elf_load_phdr(&load_info->interp_info);
if (ret != RT_EOK)
{
LOG_E("%s : elf_load_phdr failed, ret = %d", __func__, ret);
goto error_exit;
}
break;
}
return RT_EOK;
error_exit:
return ret;
}
static int total_mapping_size(elf_info_t *elf_info)
{
int i;
int first_idx = -1;
int last_idx = -1;
for (i = 0; i < elf_info->ehdr.e_phnum; i++)
{
if (elf_info->phdr[i].p_type == PT_LOAD)
{
last_idx = i;
if (first_idx == -1)
first_idx = i;
}
}
if (first_idx == -1)
return -1;
elf_info->map_size = elf_info->phdr[last_idx].p_vaddr + elf_info->phdr[last_idx].p_memsz -
ELF_PAGESTART(elf_info->phdr[first_idx].p_vaddr);
return 0;
}
static rt_ubase_t elf_map(struct rt_lwp *lwp, const Elf_Phdr *elf_phdr, int fd, rt_ubase_t addr, size_t prot, size_t flags, rt_ubase_t map_size)
{
rt_ubase_t map_va = 0;
rt_ubase_t va_offset;
addr = ELF_PAGESTART(addr);
va_offset = elf_phdr->p_offset - ELF_PAGEOFFSET(elf_phdr->p_vaddr);
rt_ubase_t size;
if (map_size != 0)
{
size = map_size;
}
else
{
size = elf_phdr->p_memsz + ELF_PAGEOFFSET(elf_phdr->p_vaddr);
if (size == 0)
{
return addr;
}
}
map_va = (rt_ubase_t)file_mmap(lwp, fd, addr, size, prot, flags, va_offset);
return map_va;
}
static int elf_zero_bss(struct rt_lwp *lwp, int fd, const Elf_Phdr *phdr, rt_ubase_t bss_start,
rt_ubase_t bss_end)
{
lwp_data_set(lwp, (void *)bss_start, 0, bss_end - bss_start);
return RT_EOK;
}
static int elf_file_mmap(elf_load_info_t *load_info, elf_info_t *elf_info, rt_ubase_t *elfload_addr,
rt_uint32_t map_size, rt_ubase_t *load_base)
{
int ret, i;
rt_ubase_t map_va, bss_start, bss_end;
Elf_Ehdr *ehdr = &elf_info->ehdr;
Elf_Phdr *phdr = elf_info->phdr;
const Elf_Phdr *tmp_phdr = phdr;
int fd = elf_info->fd;
rt_ubase_t load_addr;
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_FIXED | MAP_PRIVATE;
for (i = 0; i < ehdr->e_phnum; ++i, ++tmp_phdr)
{
if (tmp_phdr->p_type != PT_LOAD)
{
continue;
}
if (ehdr->e_type == ET_EXEC)
{
if (elf_check_phdr(tmp_phdr) != RT_EOK)
{
LOG_E("%s : elf_check_phdr failed", __func__);
return -RT_ERROR;
}
}
load_addr = tmp_phdr->p_vaddr + *load_base;
LOG_D("%s : p_vaddr : 0x%x, load_addr : 0x%x", __func__, tmp_phdr->p_vaddr, load_addr);
if ((tmp_phdr->p_vaddr == 0) && (*load_base == 0))
{
flags &= ~MAP_FIXED;
}
map_va = elf_map(load_info->lwp, tmp_phdr, fd, load_addr, prot, flags, map_size);
if (!map_va)
{
LOG_E("%s : elf_map failed", __func__);
return -ENOMEM;
}
map_size = 0;
elf_user_dump(load_info->lwp, (void *)load_addr, 64);
if ((tmp_phdr->p_memsz > tmp_phdr->p_filesz) && (tmp_phdr->p_flags & PF_W))
{
bss_start = load_addr + tmp_phdr->p_filesz;
bss_end = load_addr + tmp_phdr->p_memsz;
ret = elf_zero_bss(load_info->lwp, fd, tmp_phdr, bss_start, bss_end);
if (ret)
{
LOG_E("%s : elf_zero_bss error", __func__);
return ret;
}
}
if (*elfload_addr == 0)
{
*elfload_addr = map_va + ELF_PAGEOFFSET(tmp_phdr->p_vaddr);
LOG_D("%s elf_load_addr : %p, vAddr : %p, load_base : %p, map_va : %p", __func__,
*elfload_addr, tmp_phdr->p_vaddr, *load_base, map_va);
}
if ((*load_base == 0) && (ehdr->e_type == ET_DYN))
{
*load_base = map_va;
}
}
return RT_EOK;
}
static int load_elf_interp(elf_load_info_t *load_info, rt_ubase_t *interp_base)
{
int ret;
rt_ubase_t load_base = ELF_INTERP_LOAD_ADDR + elf_random_offset();
ret = total_mapping_size(&load_info->interp_info);
if (ret)
{
LOG_E("%s : total_mapping_size failed", __func__);
return -RT_ERROR;
}
LOG_D("%s : total_mapping_size 0x%x", __func__, load_info->interp_info.map_size);
return elf_file_mmap(load_info, &load_info->interp_info, interp_base,
load_info->interp_info.map_size, &load_base);
}
static int elf_aux_fill(elf_load_info_t *load_info)
{
uint8_t *random;
struct process_aux *aux = load_info->aux;
elf_addr_t *aux_info;
uint32_t random_value = rt_tick_get();
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_FIXED | MAP_PRIVATE;
rt_lwp_t lwp = load_info->lwp;
void *va;
if (!aux)
{
LOG_E("%s : aux is null", __func__);
return -1;
}
aux_info = (elf_addr_t *)aux->item;
ELF_AUX_ENT(aux_info, AT_PAGESZ, ARCH_PAGE_SIZE);
va = lwp_mmap2(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), ARCH_PAGE_SIZE, prot, flags, -1, 0);
if (!va)
{
LOG_E("lwp map user failed!");
return -RT_ERROR;
}
random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
lwp_data_put(load_info->lwp, random, &random_value, sizeof(random_value));
ELF_AUX_ENT(aux_info, AT_RANDOM, (size_t)random);
ELF_AUX_ENT(aux_info, AT_PHDR, (size_t)load_info->load_addr + load_info->exec_info.ehdr.e_phoff);
ELF_AUX_ENT(aux_info, AT_PHNUM, (size_t)load_info->exec_info.ehdr.e_phnum);
ELF_AUX_ENT(aux_info, AT_PHENT, sizeof(Elf_Phdr));
ELF_AUX_ENT(aux_info, AT_BASE, load_info->interp_base);
ELF_AUX_ENT(aux_info, AT_FLAGS, 0);
ELF_AUX_ENT(aux_info, AT_ENTRY, load_info->exec_info.ehdr.e_entry);
ELF_AUX_ENT(aux_info, AT_UID, 0);
ELF_AUX_ENT(aux_info, AT_EUID, 0);
ELF_AUX_ENT(aux_info, AT_GID, 0);
ELF_AUX_ENT(aux_info, AT_EGID, 0);
ELF_AUX_ENT(aux_info, AT_HWCAP, 0);
ELF_AUX_ENT(aux_info, AT_CLKTCK, 0);
ELF_AUX_ENT(aux_info, AT_SECURE, 0);
#ifdef RT_USING_VDSO
if(RT_EOK == arch_setup_additional_pages(load_info->lwp))
{
ELF_AUX_ENT(aux_info, AT_SYSINFO_EHDR, (size_t)load_info->lwp->vdso_vbase);
}
else
{
LOG_W("vdso map error,VDSO currently only supports aarch64 architecture!");
}
#endif
return 0;
}
static int elf_load_segment(elf_load_info_t *load_info)
{
int ret;
rt_ubase_t app_load_base = 0;
load_info->load_addr = 0;
load_info->interp_base = 0;
load_info->exec_info.map_size = 0;
if (load_info->exec_info.ehdr.e_type == ET_DYN)
{
ret = total_mapping_size(&load_info->exec_info);
if (ret)
{
LOG_E("%s : total_mapping_size failed", __func__);
return -RT_ERROR;
}
LOG_D("%s : map_size : 0x%x", __func__, load_info->exec_info.map_size);
app_load_base = ELF_EXEC_LOAD_ADDR + elf_random_offset();
}
ret = elf_file_mmap(load_info, &load_info->exec_info, &load_info->load_addr,
load_info->exec_info.map_size, &app_load_base);
elf_file_close(load_info->exec_info.fd);
if (ret != RT_EOK)
{
LOG_W("%s : elf_file_close exec failed", __func__);
}
load_info->exec_info.fd = ELF_INVALID_FD;
if (load_info->interp_info.fd != ELF_INVALID_FD)
{
ret = load_elf_interp(load_info, &load_info->interp_base);
if (ret)
{
LOG_E("%s : load_elf_interp failed, ret = %d", __func__, ret);
return ret;
}
elf_file_close(load_info->interp_info.fd);
if (ret != RT_EOK)
{
LOG_W("%s : elf_file_close interp failed, ret = %d", __func__, ret);
}
load_info->interp_info.fd = ELF_INVALID_FD;
load_info->e_entry = load_info->interp_info.ehdr.e_entry + load_info->interp_base;
load_info->exec_info.ehdr.e_entry = load_info->exec_info.ehdr.e_entry + app_load_base;
}
else
{
load_info->e_entry = load_info->exec_info.ehdr.e_entry;
}
load_info->lwp->text_entry = (void *)load_info->e_entry;
LOG_D("%s : lwp->text_entry : %p loadaddr : %p", __func__, load_info->lwp->text_entry, app_load_base);
elf_user_dump(load_info->lwp, load_info->lwp->text_entry, 64);
ret = elf_aux_fill(load_info);
if (ret)
{
LOG_E("%s : elf_aux_fill failed", __func__);
return ret;
}
return RT_EOK;
}
static void elf_load_deinit(elf_load_info_t *load_info)
{
if (load_info->exec_info.fd != ELF_INVALID_FD)
{
elf_file_close(load_info->exec_info.fd);
}
if (load_info->interp_info.fd != ELF_INVALID_FD)
{
elf_file_close(load_info->interp_info.fd);
}
if (load_info->exec_info.phdr != RT_NULL)
{
rt_free(load_info->exec_info.phdr);
}
if (load_info->exec_info.filename != RT_NULL)
{
rt_free(load_info->exec_info.filename);
}
if (load_info->interp_info.phdr != RT_NULL)
{
rt_free(load_info->interp_info.phdr);
}
if (load_info->interp_info.filename != RT_NULL)
{
rt_free(load_info->interp_info.filename);
}
}
static int elf_load_app(elf_info_t *exec_info)
{
int ret;
ret = elf_load_ehdr(exec_info);
if (ret != RT_EOK)
{
return ret;
}
ret = elf_load_phdr(exec_info);
if (ret != RT_EOK)
{
return ret;
}
return ret;
}
static int elf_file_load(elf_load_info_t *load_info)
{
int ret;
ret = elf_load_app(&load_info->exec_info);
if (ret != RT_EOK)
{
goto OUT;
}
ret = elf_load_interp(load_info);
if (ret != RT_EOK)
{
goto OUT;
}
ret = elf_load_segment(load_info);
if (ret != RT_EOK)
{
goto OUT;
}
OUT:
elf_load_deinit(load_info);
return ret;
}
int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size,
struct process_aux *aux_ua)
{
elf_load_info_t load_info = { 0 };
int len;
int ret;
if (filename == RT_NULL)
{
LOG_E("%s : file is NULL", __func__);
return -RT_ERROR;
}
len = rt_strlen(filename);
if (len < FLF_PATH_MIN || len > ELF_PATH_MAX)
{
LOG_E("%s : file length (%d) invalid", __func__, len);
return -RT_ERROR;
}
load_info.exec_info.filename = rt_malloc(len + 1);
if (!load_info.exec_info.filename)
{
LOG_E("%s : alloc filename failed", __func__, len);
return -RT_ERROR;
}
else
{
rt_memset(load_info.exec_info.filename, 0, len + 1);
rt_strncpy(load_info.exec_info.filename, filename, len);
}
load_info.lwp = lwp;
load_info.aux = aux_ua;
load_info.exec_info.fd = ELF_INVALID_FD;
load_info.interp_info.fd = ELF_INVALID_FD;
load_info.load_addr = (rt_ubase_t)load_addr;
/* copy file name to process name */
rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
lwp->exe_file = dfs_normalize_path(NULL, filename); // malloc
ret = elf_file_load(&load_info);
if (ret != RT_EOK)
{
LOG_E("%s : elf_file_load error, ret : %d", __func__, ret);
return ret;
}
return RT_EOK;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,936 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021/01/02 bernard the first version
* 2023-07-25 Shell Remove usage of rt_hw_interrupt API in the lwp
* Coding style: remove multiple `return` in a routine
* 2023-08-08 Shell Fix return value of futex(wait); Fix ops that only
* FUTEX_PRIVATE is supported currently
* 2023-11-03 Shell Add Support for ~FUTEX_PRIVATE
* 2023-11-16 xqyjlj Add Support for futex requeue and futex pi
*/
#define __RT_IPC_SOURCE__
#include "lwp_futex_internal.h"
#include "sys/time.h"
#include <stdatomic.h>
struct rt_mutex _glob_futex;
rt_err_t lwp_futex_init(void)
{
return rt_mutex_init(&_glob_futex, "glob_ftx", RT_IPC_FLAG_PRIO);
}
static void _futex_lock(rt_lwp_t lwp, int op_flags)
{
rt_err_t error;
if (op_flags & FUTEX_PRIVATE)
{
LWP_LOCK(lwp);
}
else
{
error = lwp_mutex_take_safe(&_glob_futex, RT_WAITING_FOREVER, 0);
if (error)
{
LOG_E("%s: Should not failed", __func__);
RT_ASSERT(0);
}
}
}
static void _futex_unlock(rt_lwp_t lwp, int op_flags)
{
rt_err_t error;
if (op_flags & FUTEX_PRIVATE)
{
LWP_UNLOCK(lwp);
}
else
{
error = lwp_mutex_release_safe(&_glob_futex);
if (error)
{
LOG_E("%s: Should not failed", __func__);
RT_ASSERT(0);
}
}
}
/**
* Destroy a Private FuTeX (pftx)
* Note: must have futex address_search_head taken
*/
static rt_err_t _pftx_destroy_locked(void *data)
{
rt_err_t ret = -1;
rt_futex_t futex = (rt_futex_t)data;
if (futex)
{
/**
* Brief: Delete the futex from lwp address_search_head
*
* Note: Critical Section
* - the lwp (READ. share by thread)
* - the lwp address_search_head (RW. protected by caller. for destroy
* routine, it's always safe because it has already taken a write lock
* to the lwp.)
*/
lwp_avl_remove(&futex->node,
(struct lwp_avl_struct **)futex->node.data);
/* release object */
if (futex->mutex)
{
rt_mutex_delete(futex->mutex);
futex->mutex = RT_NULL;
}
rt_free(futex);
ret = 0;
}
return ret;
}
/**
* Create a Private FuTeX (pftx)
* Note: must have futex address_search_head taken
*/
static rt_futex_t _pftx_create_locked(int *uaddr, struct rt_lwp *lwp)
{
rt_futex_t futex = RT_NULL;
struct rt_object *obj = RT_NULL;
/**
* Brief: Create a futex under current lwp
*
* Note: Critical Section
* - lwp (READ; share with thread)
*/
if (lwp)
{
futex = (rt_futex_t)rt_malloc(sizeof(struct rt_futex));
if (futex)
{
/* Create a Private FuTeX (pftx) */
obj = rt_custom_object_create("pftx", (void *)futex,
_pftx_destroy_locked);
if (!obj)
{
rt_free(futex);
futex = RT_NULL;
}
else
{
/**
* Brief: Add futex to user object tree for resource recycling
*
* Note: Critical Section
* - lwp user object tree (RW; protected by API)
* - futex (if the adding is successful, others can find the
* unready futex. However, only the lwp_free will do this,
* and this is protected by the ref taken by the lwp thread
* that the lwp_free will never execute at the same time)
*/
if (lwp_user_object_add(lwp, obj))
{
/* this will call a _pftx_destroy_locked, but that's okay */
rt_object_delete(obj);
rt_free(futex);
futex = RT_NULL;
}
else
{
futex->node.avl_key = (avl_key_t)uaddr;
futex->node.data = &lwp->address_search_head;
futex->custom_obj = obj;
futex->mutex = RT_NULL;
rt_list_init(&(futex->waiting_thread));
/**
* Brief: Insert into futex head
*
* Note: Critical Section
* - lwp address_search_head (RW; protected by caller)
*/
lwp_avl_insert(&futex->node, &lwp->address_search_head);
}
}
}
}
return futex;
}
/**
* Get a Private FuTeX (pftx) match the (lwp, uaddr, op)
*/
static rt_futex_t _pftx_get(void *uaddr, struct rt_lwp *lwp, int op,
rt_err_t *rc)
{
struct lwp_avl_struct *node = RT_NULL;
rt_futex_t futex = RT_NULL;
rt_err_t error = -1;
LWP_LOCK(lwp);
/**
* Note: Critical Section
* protect lwp address_search_head (READ)
*/
node = lwp_avl_find((avl_key_t)uaddr, lwp->address_search_head);
if (node)
{
futex = rt_container_of(node, struct rt_futex, node);
error = 0;
}
else
{
/* create a futex according to this uaddr */
futex = _pftx_create_locked(uaddr, lwp);
if (!futex)
error = -ENOMEM;
else
error = 0;
}
LWP_UNLOCK(lwp);
*rc = error;
return futex;
}
/**
* Destroy a Shared FuTeX (pftx)
* Note: must have futex address_search_head taken
*/
static rt_err_t _sftx_destroy(void *data)
{
rt_err_t ret = -1;
rt_futex_t futex = (rt_futex_t)data;
if (futex)
{
/* delete it even it's not in the table */
futex_global_table_delete(&futex->entry.key);
if (futex->mutex)
{
rt_mutex_delete(futex->mutex);
futex->mutex = RT_NULL;
}
rt_free(futex);
ret = 0;
}
return ret;
}
/**
* Create a Shared FuTeX (sftx)
*/
static rt_futex_t _sftx_create(struct shared_futex_key *key, struct rt_lwp *lwp)
{
rt_futex_t futex = RT_NULL;
struct rt_object *obj = RT_NULL;
if (lwp)
{
futex = (rt_futex_t)rt_calloc(1, sizeof(struct rt_futex));
if (futex)
{
/* create a Shared FuTeX (sftx) */
obj = rt_custom_object_create("sftx", (void *)futex, _sftx_destroy);
if (!obj)
{
rt_free(futex);
futex = RT_NULL;
}
else
{
if (futex_global_table_add(key, futex))
{
rt_object_delete(obj);
rt_free(futex);
futex = RT_NULL;
}
else
{
futex->mutex = RT_NULL;
rt_list_init(&(futex->waiting_thread));
futex->custom_obj = obj;
}
}
}
}
return futex;
}
/**
* Get a Shared FuTeX (sftx) match the (lwp, uaddr, op)
*/
static rt_futex_t _sftx_get(void *uaddr, struct rt_lwp *lwp, int op,
rt_err_t *rc)
{
rt_futex_t futex = RT_NULL;
struct shared_futex_key key;
rt_varea_t varea;
rt_err_t error = -1;
RD_LOCK(lwp->aspace);
varea = rt_aspace_query(lwp->aspace, uaddr);
if (varea)
{
key.mobj = varea->mem_obj;
key.offset = ((varea->offset) << MM_PAGE_SHIFT) |
((long)uaddr & ((1 << MM_PAGE_SHIFT) - 1));
RD_UNLOCK(lwp->aspace);
/* query for the key */
_futex_lock(lwp, op & ~FUTEX_PRIVATE);
error = futex_global_table_find(&key, &futex);
if (error != RT_EOK)
{
/* not found, do allocation */
futex = _sftx_create(&key, lwp);
if (!futex)
error = -ENOMEM;
else
error = 0;
}
_futex_unlock(lwp, op & ~FUTEX_PRIVATE);
}
else
{
RD_UNLOCK(lwp->aspace);
}
*rc = error;
return futex;
}
/* must have futex address_search_head taken */
static rt_futex_t _futex_get(void *uaddr, struct rt_lwp *lwp, int op_flags,
rt_err_t *rc)
{
rt_futex_t futex = RT_NULL;
if (op_flags & FUTEX_PRIVATE)
{
futex = _pftx_get(uaddr, lwp, op_flags, rc);
}
else
{
futex = _sftx_get(uaddr, lwp, op_flags, rc);
}
return futex;
}
static rt_err_t _suspend_thread_timeout_locked(rt_thread_t thread,
rt_futex_t futex,
rt_tick_t timeout)
{
rt_err_t rc;
/**
* Brief: Add current thread into futex waiting thread list
*
* Note: Critical Section
* - the futex waiting_thread list (RW)
*/
rc = rt_thread_suspend_to_list(thread, &futex->waiting_thread,
RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
if (rc == RT_EOK)
{
/* start the timer of thread */
rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME,
&timeout);
rt_timer_start(&(thread->thread_timer));
rt_set_errno(ETIMEDOUT);
}
return rc;
}
static rt_err_t _suspend_thread_locked(rt_thread_t thread, rt_futex_t futex)
{
/**
* Brief: Add current thread into futex waiting thread list
*
* Note: Critical Section
* - the futex waiting_thread list (RW)
*/
return rt_thread_suspend_to_list(thread, &futex->waiting_thread,
RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
}
rt_inline int _futex_cmpxchg_value(int *curval, int *uaddr, int uval,
int newval)
{
int err = 0;
if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
{
err = -EFAULT;
goto exit;
}
if (!atomic_compare_exchange_strong(uaddr, &uval, newval))
{
*curval = uval;
err = -EAGAIN;
}
exit:
return err;
}
static int _futex_wait(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
int value, const struct timespec *timeout, int op_flags)
{
rt_tick_t to;
rt_thread_t thread;
rt_err_t rc = -RT_EINTR;
/**
* Brief: Remove current thread from scheduler, besides appends it to
* the waiting thread list of the futex. If the timeout is specified
* a timer will be setup for current thread
*
* Note: Critical Section
* - futex.waiting (RW; Protected by lwp_lock)
* - the local cpu
*/
_futex_lock(lwp, op_flags);
if (*uaddr == value)
{
thread = rt_thread_self();
if (timeout)
{
to = timeout->tv_sec * RT_TICK_PER_SECOND;
to +=
(timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND;
if (to < 0)
{
rc = -EINVAL;
_futex_unlock(lwp, op_flags);
}
else
{
rt_enter_critical();
rc = _suspend_thread_timeout_locked(thread, futex, to);
_futex_unlock(lwp, op_flags);
rt_exit_critical();
}
}
else
{
rt_enter_critical();
rc = _suspend_thread_locked(thread, futex);
_futex_unlock(lwp, op_flags);
rt_exit_critical();
}
if (rc == RT_EOK)
{
/* do schedule */
rt_schedule();
/* check errno */
rc = rt_get_errno();
rc = rc > 0 ? -rc : rc;
}
}
else
{
_futex_unlock(lwp, op_flags);
rc = -EAGAIN;
rt_set_errno(EAGAIN);
}
return rc;
}
static long _futex_wake(rt_futex_t futex, struct rt_lwp *lwp, int number,
int op_flags)
{
long woken_cnt = 0;
int is_empty = 0;
/**
* Brief: Wakeup a suspended thread on the futex waiting thread list
*
* Note: Critical Section
* - the futex waiting_thread list (RW)
*/
while (number && !is_empty)
{
_futex_lock(lwp, op_flags);
if (rt_susp_list_dequeue(&futex->waiting_thread, RT_EOK))
{
number--;
woken_cnt++;
is_empty = RT_FALSE;
}
else
{
is_empty = RT_TRUE;
}
_futex_unlock(lwp, op_flags);
}
/* do schedule */
rt_schedule();
return woken_cnt;
}
/**
* Brief: Wake up to nr_wake futex1 threads.
* If there are more waiters waiting on futex1 than nr_wake,
* insert the remaining at most nr_requeue waiters waiting
* on futex1 into the waiting queue of futex2.
*/
static long _futex_requeue(rt_futex_t futex1, rt_futex_t futex2,
struct rt_lwp *lwp, int nr_wake, int nr_requeue,
int opflags)
{
long rtn;
long woken_cnt = 0;
int is_empty = 0;
rt_thread_t thread;
if (futex1 == futex2)
{
return -EINVAL;
}
/**
* Brief: Wakeup a suspended thread on the futex waiting thread list
*
* Note: Critical Section
* - the futex waiting_thread list (RW)
*/
while (nr_wake && !is_empty)
{
if (rt_susp_list_dequeue(&futex1->waiting_thread, RT_EOK))
{
nr_wake--;
woken_cnt++;
is_empty = RT_FALSE;
}
else
{
is_empty = RT_TRUE;
}
}
rtn = woken_cnt;
/**
* Brief: Requeue
*
* Note: Critical Section
* - the futex waiting_thread list (RW)
*/
while (!is_empty && nr_requeue)
{
rt_sched_lock_level_t slvl;
rt_sched_lock(&slvl);
/* moving from one susp list to another */
is_empty = rt_list_isempty(&(futex1->waiting_thread));
if (!is_empty)
{
thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
rt_list_insert_before(&(futex2->waiting_thread),
&RT_THREAD_LIST_NODE(thread));
nr_requeue--;
rtn++;
}
rt_sched_unlock(slvl);
}
/* do schedule */
rt_schedule();
return rtn;
}
/* timeout argument measured against the CLOCK_REALTIME clock. */
static long _futex_lock_pi(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
const struct timespec *timeout, int op_flags,
rt_bool_t trylock)
{
int word = 0, nword, cword;
int tid = 0;
rt_err_t err = 0;
rt_thread_t thread = RT_NULL, current_thread = RT_NULL;
rt_tick_t to = RT_WAITING_FOREVER;
if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
{
return -EFAULT;
}
current_thread = rt_thread_self();
_futex_lock(lwp, op_flags);
lwp_get_from_user(&word, (void *)uaddr, sizeof(int));
tid = word & FUTEX_TID_MASK;
if (word == 0)
{
/* If the value is 0, then the kernel tries
to atomically set the futex value to the caller's TID. */
nword = current_thread->tid;
if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
{
_futex_unlock(lwp, op_flags);
return -EAGAIN;
}
_futex_unlock(lwp, op_flags);
return 0;
}
else
{
thread = lwp_tid_get_thread_and_inc_ref(tid);
if (thread == RT_NULL)
{
_futex_unlock(lwp, op_flags);
return -ESRCH;
}
lwp_tid_dec_ref(thread);
nword =
word | FUTEX_WAITERS;
if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
{
_futex_unlock(lwp, op_flags);
return -EAGAIN;
}
word = nword;
}
if (futex->mutex == RT_NULL)
{
futex->mutex = rt_mutex_create("futexpi", RT_IPC_FLAG_PRIO);
if (futex->mutex == RT_NULL)
{
_futex_unlock(lwp, op_flags);
return -ENOMEM;
}
/* set mutex->owner */
rt_spin_lock(&(futex->mutex->spinlock));
futex->mutex->owner = thread;
futex->mutex->hold = 1;
rt_spin_unlock(&(futex->mutex->spinlock));
}
if (timeout)
{
to = rt_timespec_to_tick(timeout);
}
if (trylock)
{
to = RT_WAITING_NO;
}
_futex_unlock(lwp, op_flags);
err = rt_mutex_take_interruptible(futex->mutex, to);
if (err == -RT_ETIMEOUT)
{
err = -EDEADLK;
}
_futex_lock(lwp, op_flags);
nword = current_thread->tid | FUTEX_WAITERS;
if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
{
err = -EAGAIN;
}
_futex_unlock(lwp, op_flags);
return err;
}
static long _futex_unlock_pi(rt_futex_t futex, struct rt_lwp *lwp, int op_flags)
{
rt_err_t err = 0;
_futex_lock(lwp, op_flags);
if (!futex->mutex)
{
_futex_unlock(lwp, op_flags);
return -EPERM;
}
_futex_unlock(lwp, op_flags);
err = rt_mutex_release(futex->mutex);
return err;
}
#include <syscall_generic.h>
rt_inline rt_bool_t _timeout_ignored(int op)
{
/**
* if (op &
* (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI))
* was TRUE `timeout` should be ignored by implementation, according to
* POSIX futex(2) manual. since only FUTEX_WAKE is implemented in rt-smart,
* only FUTEX_WAKE was omitted currently
*/
return ((op & (FUTEX_WAKE)) || (op & (FUTEX_REQUEUE)) ||
(op & (FUTEX_CMP_REQUEUE)) || (op & (FUTEX_UNLOCK_PI)) ||
(op & (FUTEX_TRYLOCK_PI)));
}
sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
int *uaddr2, int val3)
{
struct rt_lwp *lwp = RT_NULL;
sysret_t ret = 0;
if (!lwp_user_accessable(uaddr, sizeof(int)))
{
ret = -EFAULT;
}
else if (timeout && !_timeout_ignored(op) &&
!lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
{
ret = -EINVAL;
}
else
{
lwp = lwp_self();
ret = lwp_futex(lwp, uaddr, op, val, timeout, uaddr2, val3);
}
return ret;
}
#define FUTEX_FLAGS (FUTEX_PRIVATE | FUTEX_CLOCK_REALTIME)
rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
const struct timespec *timeout, int *uaddr2, int val3)
{
rt_futex_t futex, futex2;
rt_err_t rc = 0;
int op_type = op & ~FUTEX_FLAGS;
int op_flags = op & FUTEX_FLAGS;
futex = _futex_get(uaddr, lwp, op_flags, &rc);
if (!rc)
{
switch (op_type)
{
case FUTEX_WAIT:
rc = _futex_wait(futex, lwp, uaddr, val, timeout, op_flags);
break;
case FUTEX_WAKE:
rc = _futex_wake(futex, lwp, val, op_flags);
break;
case FUTEX_REQUEUE:
futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
if (!rc)
{
_futex_lock(lwp, op_flags);
rc = _futex_requeue(futex, futex2, lwp, val, (long)timeout,
op_flags);
_futex_unlock(lwp, op_flags);
}
break;
case FUTEX_CMP_REQUEUE:
futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
_futex_lock(lwp, op_flags);
if (*uaddr == val3)
{
rc = 0;
}
else
{
rc = -EAGAIN;
}
if (rc == 0)
{
rc = _futex_requeue(futex, futex2, lwp, val,
(long)timeout, op_flags);
}
_futex_unlock(lwp, op_flags);
break;
case FUTEX_LOCK_PI:
rc = _futex_lock_pi(futex, lwp, uaddr, timeout, op_flags,
RT_FALSE);
break;
case FUTEX_UNLOCK_PI:
rc = _futex_unlock_pi(futex, lwp, op_flags);
break;
case FUTEX_TRYLOCK_PI:
rc = _futex_lock_pi(futex, lwp, uaddr, 0, op_flags, RT_TRUE);
break;
default:
LOG_W("User require op=%d which is not implemented", op);
rc = -ENOSYS;
break;
}
}
return rc;
}
rt_inline int _fetch_robust_entry(struct robust_list **entry,
struct robust_list **head, rt_bool_t *is_pi)
{
unsigned long uentry;
if (!lwp_user_accessable((void *)head, sizeof(*head)))
{
return -EFAULT;
}
if (lwp_get_from_user(&uentry, (void *)head, sizeof(*head)) !=
sizeof(*head))
{
return -EFAULT;
}
*entry = (void *)(uentry & ~1UL);
*is_pi = uentry & 1;
return 0;
}
static int _handle_futex_death(int *uaddr, rt_thread_t thread, rt_bool_t is_pi,
rt_bool_t is_pending_op)
{
int word, cword = 0, nword;
rt_err_t rc;
struct rt_lwp *lwp;
rt_futex_t futex;
/* Futex address must be 32bit aligned */
if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
return -1;
lwp = thread->lwp;
retry:
if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
{
return -1;
}
if (lwp_get_from_user(&word, (void *)uaddr, sizeof(*uaddr)) !=
sizeof(*uaddr))
{
return -1;
}
futex = _futex_get(uaddr, lwp, FUTEX_PRIVATE, &rc);
if (is_pending_op && !is_pi && !word)
{
_futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
return 0;
}
if ((word & FUTEX_TID_MASK) != thread->tid)
return 0;
nword = (word & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
if ((rc = _futex_cmpxchg_value(&cword, uaddr, word, nword)))
{
switch (rc)
{
case -EFAULT:
return -1;
case -EAGAIN:
rt_schedule();
goto retry;
default:
LOG_W("unknown errno: %d in '%s'", rc, __FUNCTION__);
return rc;
}
}
if (cword != word)
goto retry;
if (!is_pi && (word & FUTEX_WAITERS))
_futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
return 0;
}
/**
* Brief: Walk thread->robust_list mark
* any locks found there dead, and notify any waiters.
*
* note: very carefully, it's a userspace list!
*/
void lwp_futex_exit_robust_list(rt_thread_t thread)
{
struct robust_list *entry = RT_NULL;
struct robust_list *next_entry = RT_NULL;
struct robust_list *pending = RT_NULL;
struct robust_list_head *head;
unsigned int limit = 2048;
rt_bool_t pi, pip, next_pi;
unsigned long futex_offset;
int rc;
head = thread->robust_list;
if (head == RT_NULL)
return;
if (_fetch_robust_entry(&entry, &head->list.next, &pi))
return;
if (!lwp_user_accessable((void *)&head->futex_offset,
sizeof(head->futex_offset)))
{
return;
}
if (lwp_get_from_user(&futex_offset, (void *)&head->futex_offset,
sizeof(head->futex_offset)) !=
sizeof(head->futex_offset))
{
return;
}
if (_fetch_robust_entry(&pending, &head->list_op_pending, &pip))
{
return;
}
while (entry != &head->list)
{
rc = _fetch_robust_entry(&next_entry, &entry->next, &next_pi);
if (entry != pending)
{
if (_handle_futex_death((int *)((size_t)entry + futex_offset), thread, pi,
RT_FALSE))
return;
}
if (rc)
return;
entry = next_entry;
pi = next_pi;
if (!--limit)
break;
}
if (pending)
{
_handle_futex_death((void *)pending + futex_offset, thread, pip,
RT_TRUE);
}
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-01 Shell Init ver.
*/
#ifndef __LWP_FUTEX_INTERNAL_H__
#define __LWP_FUTEX_INTERNAL_H__
#define DBG_TAG "lwp.futex"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "rt_uthash.h"
#include "lwp_internal.h"
#include "lwp_pid.h"
#include <rtthread.h>
#include <lwp.h>
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
#endif /* ARCH_MM_MMU */
struct shared_futex_key
{
rt_mem_obj_t mobj;
rt_base_t offset;
};
DEFINE_RT_UTHASH_TYPE(shared_futex_entry, struct shared_futex_key, key);
struct rt_futex
{
union {
/* for private futex */
struct lwp_avl_struct node;
/* for shared futex */
struct shared_futex_entry entry;
};
rt_list_t waiting_thread;
struct rt_object *custom_obj;
rt_mutex_t mutex;
};
typedef struct rt_futex *rt_futex_t;
rt_err_t futex_global_table_add(struct shared_futex_key *key, rt_futex_t futex);
rt_err_t futex_global_table_find(struct shared_futex_key *key, rt_futex_t *futex);
rt_err_t futex_global_table_delete(struct shared_futex_key *key);
#endif /* __LWP_FUTEX_INTERNAL_H__ */

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-01 Shell Init ver.
*/
#include "lwp_futex_internal.h"
static struct shared_futex_entry *_futex_hash_head;
rt_err_t futex_global_table_add(struct shared_futex_key *key, rt_futex_t futex)
{
rt_err_t rc = 0;
struct shared_futex_entry *entry = &futex->entry;
futex->entry.key.mobj = key->mobj;
futex->entry.key.offset = key->offset;
RT_UTHASH_ADD(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
return rc;
}
rt_err_t futex_global_table_find(struct shared_futex_key *key, rt_futex_t *futex)
{
rt_err_t rc;
rt_futex_t found_futex;
struct shared_futex_entry *entry;
RT_UTHASH_FIND(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
if (entry)
{
rc = RT_EOK;
found_futex = rt_container_of(entry, struct rt_futex, entry);
}
else
{
rc = -RT_ENOENT;
found_futex = RT_NULL;
}
*futex = found_futex;
return rc;
}
rt_err_t futex_global_table_delete(struct shared_futex_key *key)
{
rt_err_t rc;
struct shared_futex_entry *entry;
RT_UTHASH_FIND(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
if (entry)
{
RT_UTHASH_DELETE(_futex_hash_head, entry);
rc = RT_EOK;
}
else
{
rc = -RT_ENOENT;
}
return rc;
}

View File

@ -0,0 +1,182 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-25 Shell first version
* 2023-11-25 Shell Add pgrp, session lock API
*/
#define DBG_TAG "lwp.internal"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <stdlib.h>
#include "lwp_internal.h"
static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, int flags)
{
LWP_DEF_RETURN_CODE(rc);
int retry;
rt_int32_t effect_timeout;
#ifdef LWP_DEBUG
rt_thread_t thread = rt_thread_self();
#endif
if (mtx)
{
effect_timeout = timeout;
#if DBG_LVL == DBG_LOG && defined(LWP_DEBUG)
int exception;
rt_list_t *node = RT_NULL;
struct rt_mutex *tak_obj = RT_NULL;
if (!rt_list_isempty(&(thread->taken_object_list)) && timeout == RT_WAITING_FOREVER)
{
exception = 1;
effect_timeout = 0;
}
else
{
exception = 0;
}
#endif /* DBG_LOG && defined(LWP_DEBUG) */
do {
retry = 0;
if (flags & LWP_MTX_FLAGS_INTR)
rc = rt_mutex_take_interruptible(mtx, effect_timeout);
else
rc = rt_mutex_take_killable(mtx, effect_timeout);
#ifdef LWP_DEBUG
if (rc == RT_EOK)
{
if (!(flags & LWP_MTX_FALGS_NESTED) && rt_mutex_get_hold(mtx) > 1)
{
LOG_W("Already hold the lock");
rt_backtrace();
}
}
else if (rc == -RT_ETIMEOUT)
{
#if DBG_LVL == DBG_LOG
if (exception)
{
rt_list_for_each(node, &(thread->taken_object_list))
{
tak_obj = rt_list_entry(node, struct rt_mutex, taken_list);
if (rt_mutex_get_owner(tak_obj)->stat & RT_THREAD_SUSPEND_MASK)
LOG_D("Potential dead lock - Taken: %s, Try take: %s",
tak_obj->parent.parent.name, mtx->parent.parent.name);
}
rt_backtrace();
retry = 1;
exception = 0;
}
#endif
}
else if (rc != -RT_EINTR)
{
char tname[RT_NAME_MAX];
rt_thread_get_name(thread, tname, sizeof(tname));
LOG_W("Possible kernel corruption detected on thread %s with errno %ld", tname, rc);
}
#endif /* LWP_DEBUG */
} while (retry);
}
else
{
rc = -RT_ERROR;
LOG_W("%s: mtx should not be NULL", __func__);
RT_ASSERT(0);
}
LWP_RETURN(rc);
}
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, int flags)
{
LWP_DEF_RETURN_CODE(rc);
rc = _mutex_take_safe(mtx, timeout, flags);
LWP_RETURN(rc);
}
rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
{
LWP_DEF_RETURN_CODE(rc);
rc = rt_mutex_release(mtx);
if (rc)
{
LOG_I("%s: release failed with code %ld", __func__, rc);
rt_backtrace();
}
LWP_RETURN(rc);
}
rt_err_t lwp_critical_enter(struct rt_lwp *lwp, int flags)
{
rt_err_t rc;
do {
rc = lwp_mutex_take_safe(&lwp->lwp_lock, RT_WAITING_FOREVER, flags);
} while (rc != RT_EOK && !(flags & LWP_MTX_FLAGS_INTR) && rc == -RT_EINTR);
/* if current process is force killed */
if (rc != RT_EOK && rc != -RT_EINTR)
{
LOG_I("%s: unexpected return code = %ld", __func__, rc);
}
return rc;
}
rt_err_t lwp_critical_exit(struct rt_lwp *lwp)
{
return lwp_mutex_release_safe(&lwp->lwp_lock);
}
rt_err_t lwp_pgrp_critical_enter(struct rt_processgroup *pgrp, int flags)
{
rt_err_t rc;
do {
rc = lwp_mutex_take_safe(&pgrp->mutex, RT_WAITING_FOREVER, flags);
} while (rc != RT_EOK && !(flags & LWP_MTX_FLAGS_INTR) && rc == -RT_EINTR);
/* if current process is force killed */
if (rc != RT_EOK && rc != -RT_EINTR)
{
LOG_I("%s: unexpected return code = %ld", __func__, rc);
}
return rc;
}
rt_err_t lwp_pgrp_critical_exit(struct rt_processgroup *pgrp)
{
return lwp_mutex_release_safe(&pgrp->mutex);
}
rt_err_t lwp_sess_critical_enter(struct rt_session *sess, int flags)
{
rt_err_t rc;
do {
rc = lwp_mutex_take_safe(&sess->mutex, RT_WAITING_FOREVER, flags);
} while (rc != RT_EOK && !(flags & LWP_MTX_FLAGS_INTR) && rc == -RT_EINTR);
/* if current process is force killed */
if (rc != RT_EOK && rc != -RT_EINTR)
{
LOG_I("%s: unexpected return code = %ld", __func__, rc);
}
return rc;
}
rt_err_t lwp_sess_critical_exit(struct rt_session *sess)
{
return lwp_mutex_release_safe(&sess->mutex);
}

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-25 Shell first version
* 2023-11-25 Shell Add pgrp, session lock API
*/
#ifndef __LWP_INTERNAL_H__
#define __LWP_INTERNAL_H__
#include "lwp.h"
#include "lwp_arch.h"
#include "lwp_user_mm.h"
#include "lwp_mm.h"
#include <rtthread.h>
#include "libc_musl.h"
struct rt_lwp;
#define LWP_MTX_FLAGS_INTR 0x1 /* interruptible waiting */
#define LWP_MTX_FALGS_NESTED 0x2 /* allow nested */
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, int flags);
rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx);
rt_inline rt_bool_t lwp_in_user_space(const char *addr)
{
return (addr >= (char *)USER_VADDR_START && addr < (char *)USER_VADDR_TOP);
}
#ifdef RT_USING_SMP
#define LOCAL_IRQ_MASK() rt_hw_local_irq_disable()
#define LOCAL_IRQ_UNMASK(level) rt_hw_local_irq_enable(level)
#else
#define LOCAL_IRQ_MASK() rt_hw_interrupt_disable()
#define LOCAL_IRQ_UNMASK(level) rt_hw_interrupt_enable(level)
#endif
#ifndef LWP_USING_CPUS_LOCK
rt_err_t lwp_sess_critical_enter(struct rt_session *sess, int flags);
rt_err_t lwp_sess_critical_exit(struct rt_session *sess);
rt_err_t lwp_pgrp_critical_enter(struct rt_processgroup *pgrp, int flags);
rt_err_t lwp_pgrp_critical_exit(struct rt_processgroup *pgrp);
rt_err_t lwp_critical_enter(struct rt_lwp *lwp, int flags);
rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
#define LWP_ASSERT_LOCKED(proc) RT_ASSERT(rt_mutex_get_owner(&(proc)->lwp_lock) == rt_thread_self())
#define PGRP_ASSERT_LOCKED(pgrp) RT_ASSERT(rt_mutex_get_owner(&(pgrp)->mutex) == rt_thread_self())
#define LWP_LOCK(lwp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_critical_enter(lwp, 0) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define LWP_LOCK_NESTED(lwp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_critical_enter(lwp, LWP_MTX_FALGS_NESTED) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define LWP_UNLOCK(lwp) \
do { \
if (lwp_critical_exit(lwp) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define PGRP_LOCK(pgrp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_pgrp_critical_enter(pgrp, 0) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define PGRP_LOCK_NESTED(pgrp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_pgrp_critical_enter(pgrp, LWP_MTX_FALGS_NESTED) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define PGRP_UNLOCK(pgrp) \
do \
{ \
if (lwp_pgrp_critical_exit(pgrp) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define SESS_LOCK(sess) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_sess_critical_enter(sess, 0) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define SESS_LOCK_NESTED(sess) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_sess_critical_enter(sess, LWP_MTX_FALGS_NESTED) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define SESS_UNLOCK(sess) \
do \
{ \
if (lwp_sess_critical_exit(sess) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#else
#define LWP_LOCK(lwp) rt_base_t level = rt_hw_interrupt_disable()
#define LWP_UNLOCK(lwp) rt_hw_interrupt_enable(level)
#define PGRP_LOCK(pgrp) rt_base_t level = rt_hw_interrupt_disable()
#define PGRP_UNLOCK(pgrp) rt_hw_interrupt_enable(level)
#define SESS_LOCK(sess) rt_base_t level = rt_hw_interrupt_disable()
#define SESS_UNLOCK(sess) rt_hw_interrupt_enable(level)
#endif /* LWP_USING_CPUS_LOCK */
/* cpus lock */
#ifdef LWP_OVERRIDE_CPUS_LOCK
#undef rt_hw_interrupt_disable
#undef rt_hw_interrupt_enable
#define rt_hw_interrupt_disable() ({ \
rt_base_t irq = rt_hw_interrupt_is_disabled(); \
if (irq) \
{ \
LOG_W("Nested interrupt disable"); \
rt_backtrace(); \
irq = 0xabadcafe; \
} else { \
irq = rt_cpus_lock(); \
} \
irq; \
})
#define rt_hw_interrupt_enable(level) do { \
if (level != 0xabadcafe) \
rt_cpus_unlock(level); \
} while (0)
#endif /* LWP_OVERRIDE_CPUS_LOCK */
/**
* Brief: Return code with safety check
* There tend to be chances where a return value is returned without correctly init
*/
#ifndef LWP_DEBUG
#define LWP_DEF_RETURN_CODE(name) rt_err_t name;RT_UNUSED(name)
#define LWP_RETURN(name) return name
#else
#define _LWP_UNINITIALIZED_RC 0xbeefcafe
#define LWP_DEF_RETURN_CODE(name) rt_err_t name = _LWP_UNINITIALIZED_RC
#define LWP_RETURN(name) {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
#endif /* LWP_DEBUG */
#endif /* __LWP_INTERNAL_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-12 Jesven first version
*/
#ifndef LWP_IPC_H__
#define LWP_IPC_H__
#ifdef __cplusplus
extern "C" {
#endif
enum
{
RT_CHANNEL_RAW,
RT_CHANNEL_BUFFER,
RT_CHANNEL_FD
};
struct rt_channel_msg
{
void *sender;
int type;
union
{
struct chbuf
{
void *buf;
size_t length;
} b;
struct chfd
{
void *file;
int fd;
} fd;
void* d;
} u;
};
typedef struct rt_channel_msg *rt_channel_msg_t;
int rt_channel_open(const char *name, int flags);
rt_err_t rt_channel_close(int fd);
rt_err_t rt_channel_send(int fd, rt_channel_msg_t data);
rt_err_t rt_channel_send_recv(int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret);
rt_err_t rt_channel_send_recv_timeout(int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time);
rt_err_t rt_channel_reply(int fd, rt_channel_msg_t data);
rt_err_t rt_channel_recv(int fd, rt_channel_msg_t data);
rt_err_t rt_channel_recv_timeout(int fd, rt_channel_msg_t data, rt_int32_t time);
rt_err_t rt_channel_peek(int fd, rt_channel_msg_t data);
rt_channel_t rt_raw_channel_open(const char *name, int flags);
rt_err_t rt_raw_channel_close(rt_channel_t ch);
rt_err_t rt_raw_channel_send(rt_channel_t ch, rt_channel_msg_t data);
rt_err_t rt_raw_channel_send_recv(rt_channel_t ch, rt_channel_msg_t data, rt_channel_msg_t data_ret);
rt_err_t rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time);
rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data);
rt_err_t rt_raw_channel_recv(rt_channel_t ch, rt_channel_msg_t data);
rt_err_t rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_int32_t time);
rt_err_t rt_raw_channel_peek(rt_channel_t ch, rt_channel_msg_t data);
rt_err_t rt_channel_component_init(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-16 Jesven first version
*/
#ifndef LWP_IPC_INTERNAL_H__
#define LWP_IPC_INTERNAL_H__
#include <rthw.h>
#include <rtthread.h>
#include <lwp.h>
#ifdef __cplusplus
extern "C" {
#endif
enum
{
FDT_TYPE_LWP,
FDT_TYPE_KERNEL
};
int lwp_channel_open(int fdt_type, const char *name, int flags);
rt_err_t lwp_channel_close(int fdt_type, int fd);
rt_err_t lwp_channel_send(int fdt_type, int fd, rt_channel_msg_t data);
rt_err_t lwp_channel_send_recv_timeout(int fdt_type, int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time);
rt_err_t lwp_channel_reply(int fdt_type, int fd, rt_channel_msg_t data);
rt_err_t lwp_channel_recv_timeout(int fdt_type, int fd, rt_channel_msg_t data, rt_int32_t time);
#ifdef __cplusplus
}
#endif
#endif /* LWP_IPC_INTERNAL_H__*/

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-30 Shell Add itimer support
*/
#define _GNU_SOURCE
#include <sys/time.h>
#undef _GNU_SOURCE
#define DBG_TAG "lwp.signal"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rthw.h>
#include <rtthread.h>
#include <string.h>
#include "lwp_internal.h"
#include "sys/signal.h"
#include "syscall_generic.h"
rt_err_t lwp_signal_setitimer(rt_lwp_t lwp, int which, const struct itimerspec *restrict new, struct itimerspec *restrict old)
{
rt_err_t rc = RT_EOK;
timer_t timerid = 0;
int flags = 0;
if (lwp->signal.real_timer == LWP_SIG_INVALID_TIMER)
{
struct sigevent sevp = {
.sigev_signo = SIGALRM,
.sigev_notify = SIGEV_SIGNAL,
};
rc = timer_create(CLOCK_REALTIME_ALARM, &sevp, &timerid);
if (rc == RT_EOK)
{
RT_ASSERT(timerid != LWP_SIG_INVALID_TIMER);
lwp->signal.real_timer = timerid;
}
else
{
/* failed to create timer */
}
}
else
{
timerid = lwp->signal.real_timer;
}
if (rc == RT_EOK)
{
switch (which)
{
case ITIMER_REAL:
rc = timer_settime(timerid, flags, new, old);
break;
default:
rc = -ENOSYS;
LOG_W("%s() unsupported timer", __func__);
break;
}
}
return rc;
}

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <terminal/terminal.h>
#include "lwp_internal.h"
static void jobctrl_set_pgrp_orphaned(struct rt_processgroup *pgrp)
{
rt_lwp_t proc, nx_proc;
PGRP_LOCK(pgrp);
pgrp->is_orphaned = 1;
rt_list_for_each_entry(proc, &pgrp->process, pgrp_node)
{
LWP_LOCK(proc);
if (proc->jobctl_stopped)
{
LWP_UNLOCK(proc);
rt_list_for_each_entry_safe(proc, nx_proc, &pgrp->process, pgrp_node)
{
LWP_LOCK(proc);
lwp_signal_kill(proc, SIGHUP, SI_KERNEL, 0);
lwp_signal_kill(proc, SIGCONT, SI_KERNEL, 0);
LWP_UNLOCK(proc);
}
}
LWP_UNLOCK(proc);
}
PGRP_UNLOCK(pgrp);
}
void lwp_jobctrl_on_exit(struct rt_lwp *lwp)
{
rt_processgroup_t pgrp;
rt_session_t session;
lwp_tty_t tp;
pgrp = lwp->pgrp;
RT_ASSERT(pgrp);
session = pgrp->session;
RT_ASSERT(session);
/**
* as a session leader, we have to mark tty as freed. So others can race to
* take it before we actually close and released that tty
*/
SESS_LOCK(session);
if (session->sid == lwp->pid)
{
tp = session->ctty;
session->leader = 0;
/* signal to foreground group that modem is disconnected */
if (tp)
{
tty_lock(tp);
if (tp->t_session == session)
lwp_tty_signal_pgrp(tp, SIGHUP);
tty_unlock(tp);
}
/* revoke tty vnode ? */
rt_list_for_each_entry(pgrp, &session->processgroup, pgrp_list_node)
{
jobctrl_set_pgrp_orphaned(pgrp);
}
}
SESS_UNLOCK(session);
/* release tty */
/* allow tty stolen? */
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include <rthw.h>
#include <rtthread.h>
#include "lwp_mm.h"
static rt_mutex_t mm_lock;
void rt_mm_lock(void)
{
if (rt_thread_self())
{
if (!mm_lock)
{
mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
}
if (mm_lock)
{
rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
}
}
}
void rt_mm_unlock(void)
{
if (rt_thread_self())
{
if (mm_lock)
{
rt_mutex_release(mm_lock);
}
}
}

View File

@ -0,0 +1,16 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __LWP_MM_H__
#define __LWP_MM_H__
void rt_mm_lock(void);
void rt_mm_unlock(void);
#endif /*__LWP_MM_H__*/

View File

@ -0,0 +1,554 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-17 xqyjlj the first version
* 2023-11-28 Shell Add reference management for pgrp;
* Using lwp lock API and fix the dead lock problem
*/
#include "lwp.h"
#include "lwp_internal.h"
#include "lwp_syscall.h"
#define DBG_TAG "lwp.pgrp"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
void lwp_pgrp_dec_ref(rt_processgroup_t pgrp)
{
if (rt_atomic_add(&pgrp->ref, -1) == 1)
{
rt_mutex_detach(&(pgrp->mutex));
/* clear self pgid */
pgrp->pgid = 0;
rt_free(pgrp);
}
}
rt_processgroup_t lwp_pgrp_find_and_inc_ref(pid_t pgid)
{
rt_processgroup_t group;
group = lwp_pgrp_find(pgid);
if (group)
{
rt_atomic_add(&(group->ref), 1);
}
return group;
}
rt_processgroup_t lwp_pgrp_find(pid_t pgid)
{
rt_base_t level;
rt_processgroup_t group = RT_NULL;
rt_list_t *node = RT_NULL;
struct rt_object_information *information = RT_NULL;
information = rt_object_get_information(RT_Object_Class_ProcessGroup);
/* parameter check */
if ((pgid < 0) || (information == RT_NULL))
{
return RT_NULL;
}
if (pgid == 0)
{
pgid = lwp_getpid();
}
/* enter critical */
level = rt_spin_lock_irqsave(&(information->spinlock));
/* try to find process group */
rt_list_for_each(node, &(information->object_list))
{
group = (rt_processgroup_t)rt_list_entry(node, struct rt_object, list);
if (group->pgid == pgid)
{
rt_spin_unlock_irqrestore(&(information->spinlock), level);
return group;
}
}
rt_spin_unlock_irqrestore(&(information->spinlock), level);
LOG_I("cannot find(pgid:%d)() by (pid:%d, pgid:%d)", pgid, lwp_getpid(), lwp_pgid_get_byprocess(lwp_self()));
return RT_NULL;
}
rt_processgroup_t lwp_pgrp_create(rt_lwp_t leader)
{
rt_processgroup_t group = RT_NULL;
/* parameter check */
if (leader == RT_NULL)
{
return RT_NULL;
}
group = rt_malloc(sizeof(struct rt_processgroup));
if (group != RT_NULL)
{
rt_object_init(&(group->object), RT_Object_Class_ProcessGroup, "pgrp");
rt_list_init(&(group->process));
rt_list_init(&(group->pgrp_list_node));
rt_mutex_init(&(group->mutex), "pgrp", RT_IPC_FLAG_PRIO);
group->leader = leader;
group->sid = 0;
group->session = RT_NULL;
group->is_orphaned = 0;
group->pgid = lwp_to_pid(leader);
rt_atomic_store(&group->ref, 1);
}
LOG_I("create(ptr:%p, pgid:%d)() by pid:%d", group, group->pgid, lwp_getpid());
return group;
}
#include <terminal/terminal.h>
int lwp_pgrp_delete(rt_processgroup_t group)
{
int retry = 1;
rt_session_t session = RT_NULL;
int is_session_free = 0;
lwp_tty_t ctty;
/* parameter check */
if (group == RT_NULL)
{
return -EINVAL;
}
LOG_I("delete(ptr:%p, pgid:%d)() by pid:%d", group, group->pgid, lwp_getpid());
while (retry)
{
retry = 0;
session = lwp_session_find(lwp_sid_get_bypgrp(group));
if (session)
{
ctty = session->ctty;
if (ctty)
{
/**
* Note: it's safe to release pgrp even we do this multiple,
* the neccessary check is done before the tty actually detach
*/
tty_lock(ctty);
tty_rel_pgrp(ctty, group); // tty_unlock
}
SESS_LOCK(session);
PGRP_LOCK_NESTED(group);
if (group->session == session && session->ctty == ctty)
{
rt_object_detach(&(group->object));
is_session_free = lwp_session_remove(session, group);
}
else
{
retry = 1;
}
PGRP_UNLOCK(group);
if (is_session_free != 1)
SESS_UNLOCK(session);
}
else
{
rt_object_detach(&(group->object));
}
}
lwp_pgrp_dec_ref(group);
return 0;
}
int lwp_pgrp_insert(rt_processgroup_t group, rt_lwp_t process)
{
/* parameter check */
if (group == RT_NULL || process == RT_NULL)
{
return -EINVAL;
}
PGRP_LOCK_NESTED(group);
LWP_LOCK_NESTED(process);
RT_ASSERT(rt_mutex_get_hold(&process->lwp_lock) <= rt_mutex_get_hold(&group->mutex));
process->pgid = group->pgid;
process->pgrp = group;
process->sid = group->sid;
rt_list_insert_after(&(group->process), &(process->pgrp_node));
LWP_UNLOCK(process);
PGRP_UNLOCK(group);
return 0;
}
int lwp_pgrp_remove(rt_processgroup_t group, rt_lwp_t process)
{
rt_bool_t is_empty = RT_FALSE;
/* parameter check */
if (group == RT_NULL || process == RT_NULL)
{
return -EINVAL;
}
PGRP_LOCK_NESTED(group);
LWP_LOCK_NESTED(process);
RT_ASSERT(rt_mutex_get_hold(&process->lwp_lock) <= rt_mutex_get_hold(&group->mutex));
rt_list_remove(&(process->pgrp_node));
/* clear children sid and pgid */
process->pgrp = RT_NULL;
process->pgid = 0;
process->sid = 0;
LWP_UNLOCK(process);
is_empty = rt_list_isempty(&(group->process));
PGRP_UNLOCK(group);
if (is_empty)
{
lwp_pgrp_delete(group);
return 1;
}
return 0;
}
int lwp_pgrp_move(rt_processgroup_t group, rt_lwp_t process)
{
int retry = 1;
rt_processgroup_t old_group;
/* parameter check */
if (group == RT_NULL || process == RT_NULL)
{
return -EINVAL;
}
if (lwp_pgid_get_bypgrp(group) == lwp_pgid_get_byprocess(process))
{
return 0;
}
PGRP_LOCK(group);
while (retry)
{
retry = 0;
old_group = lwp_pgrp_find_and_inc_ref(lwp_pgid_get_byprocess(process));
PGRP_LOCK(old_group);
LWP_LOCK(process);
if (process->pgrp == old_group)
{
lwp_pgrp_remove(old_group, process);
lwp_pgrp_insert(group, process);
}
else
{
retry = 1;
}
PGRP_UNLOCK(old_group);
LWP_UNLOCK(process);
lwp_pgrp_dec_ref(old_group);
}
PGRP_UNLOCK(group);
return 0;
}
int lwp_pgrp_update_children_info(rt_processgroup_t group, pid_t sid, pid_t pgid)
{
rt_list_t *node = RT_NULL;
rt_lwp_t process = RT_NULL;
if (group == RT_NULL)
{
return -EINVAL;
}
PGRP_LOCK_NESTED(group);
/* try to find process group */
rt_list_for_each(node, &(group->process))
{
process = (rt_lwp_t)rt_list_entry(node, struct rt_lwp, pgrp_node);
LWP_LOCK(process);
if (sid != -1)
{
process->sid = sid;
}
if (pgid != -1)
{
process->pgid = pgid;
process->pgrp = group;
}
LWP_UNLOCK(process);
}
PGRP_UNLOCK(group);
return 0;
}
/**
* setpgid() sets the PGID of the process specified by pid to pgid.
* If pid is zero, then the process ID of the calling process is used.
* If pgid is zero, then the PGID of the process specified by pid is made the same as its process ID.
* If setpgid() is used to move a process from one process group to another (as is done by some shells when
* creating pipelines), both process groups must be part of the same session (see setsid(2) and credentials(7)).
* In this case, the pgid specifies an existing process group to be joined and the session ID of that group must
* match the session ID of the joining process.
*/
sysret_t sys_setpgid(pid_t pid, pid_t pgid)
{
rt_lwp_t process, self_process;
pid_t sid;
rt_processgroup_t group;
rt_session_t session;
sysret_t err = 0;
if (pgid == 0)
{
pgid = pid;
}
if (pgid < 0)
{
return -EINVAL;
}
self_process = lwp_self();
if (pid == 0)
{
pid = self_process->pid;
process = self_process;
}
else
{
lwp_pid_lock_take();
process = lwp_from_pid_locked(pid);
lwp_pid_lock_release();
if (process == RT_NULL)
{
return -ESRCH;
}
}
LWP_LOCK(process);
if (process->parent == self_process)
{
/**
* change the process group ID of one of the children of the calling process and the child was in
* a different session
*/
if (lwp_sid_get_byprocess(process) != lwp_sid_get_byprocess(self_process))
{
err = -EPERM;
LWP_UNLOCK(process);
goto exit;
}
/**
* An attempt was made to change the process group ID of one of the children of the calling process
* and the child had already performed an execve(2)
*/
if (process->did_exec)
{
err = -EACCES;
LWP_UNLOCK(process);
goto exit;
}
}
else
{
/**
* pid is not the calling process and not a child of the calling process.
*/
if (process != self_process)
{
err = -ESRCH;
LWP_UNLOCK(process);
goto exit;
}
}
LWP_UNLOCK(process);
sid = lwp_sid_get_byprocess(self_process);
if (pgid != pid)
{
group = lwp_pgrp_find(pgid);
if (group == RT_NULL)
{
group = lwp_pgrp_create(process);
lwp_pgrp_move(group, process);
session = lwp_session_find(sid);
if (session == RT_NULL)
{
LOG_E("the session of sid: %d cannot be found", sid);
err = -EPERM;
goto exit;
}
else
{
lwp_session_insert(session, group);
}
}
else
{
/**
* An attempt was made to move a process into a process group in a different session
*/
if (sid != lwp_sid_get_bypgrp(group))
{
err = -EPERM;
goto exit;
}
/**
* or to change the process group ID of a session leader
*/
if (sid == lwp_to_pid(process))
{
err = -EPERM;
goto exit;
}
lwp_pgrp_move(group, process);
}
}
else
{
group = lwp_pgrp_find(pgid);
if (group == RT_NULL)
{
group = lwp_pgrp_create(process);
lwp_pgrp_move(group, process);
session = lwp_session_find(sid);
if (session == RT_NULL)
{
LOG_E("the session of sid: %d cannot be found", sid);
err = -EPERM;
goto exit;
}
else
{
lwp_session_insert(session, group);
}
}
else // this represents repeated calls
{
/**
* or to change the process group ID of a session leader
*/
if (lwp_sid_get_bypgrp(group) == lwp_pgid_get_bypgrp(group))
{
err = -EPERM;
goto exit;
}
else
{
err = 0;
}
}
}
exit:
return err;
}
/**
* getpgid() returns the PGID of the process specified by pid.
* If pid is zero, the process ID of the calling process is used. (Retrieving the PGID of a process other
* than the caller is rarely necessary, and the POSIX.1 getpgrp() is preferred for that task.)
*/
sysret_t sys_getpgid(pid_t pid)
{
rt_lwp_t process;
lwp_pid_lock_take();
process = lwp_from_pid_locked(pid);
lwp_pid_lock_release();
if (process == RT_NULL)
{
return -ESRCH;
}
return lwp_pgid_get_byprocess(process);
}
#ifdef RT_USING_FINSH
#include "finsh.h"
long list_processgroup(void)
{
int count = 0, index;
rt_processgroup_t *groups;
rt_processgroup_t group;
rt_thread_t thread;
char name[RT_NAME_MAX];
rt_kprintf("PGID SID leader process\n");
rt_kprintf("---- ---- ----------------\n");
count = rt_object_get_length(RT_Object_Class_ProcessGroup);
if (count > 0)
{
/* get pointers */
groups = (rt_processgroup_t *)rt_calloc(count, sizeof(rt_processgroup_t));
if (groups)
{
index = rt_object_get_pointers(RT_Object_Class_ProcessGroup, (rt_object_t *)groups, count);
if (index > 0)
{
for (index = 0; index < count; index++)
{
struct rt_processgroup pgrp;
group = groups[index];
PGRP_LOCK(group);
rt_memcpy(&pgrp, group, sizeof(struct rt_processgroup));
PGRP_UNLOCK(group);
if (pgrp.leader)
{
thread = rt_list_entry(pgrp.leader->t_grp.prev, struct rt_thread, sibling);
rt_strncpy(name, thread->parent.name, RT_NAME_MAX);
}
else
{
rt_strncpy(name, "nil", RT_NAME_MAX);
}
rt_kprintf("%4d %4d %-*.*s\n", pgrp.pgid, pgrp.sid, RT_NAME_MAX, RT_NAME_MAX, name);
}
}
rt_free(groups);
}
}
return 0;
}
MSH_CMD_EXPORT(list_processgroup, list process group);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-02-23 Jesven first version.
*/
#ifndef LWP_PID_H__
#define LWP_PID_H__
#ifdef __cplusplus
extern "C" {
#endif
#include <rtthread.h>
#define LWP_CREATE_FLAG_NONE 0x0000
#define LWP_CREATE_FLAG_ALLOC_PID 0x0001 /* allocate pid on lwp object create */
#define LWP_CREATE_FLAG_INIT_USPACE 0x0002 /* do user space initialization */
#define LWP_CREATE_FLAG_NOTRACE_EXEC 0x0004 /* not trace if execve() after fork() */
struct rt_lwp;
struct lwp_avl_struct *lwp_get_pid_ary(void);
int lwp_pid_init(void);
int lwp_pid_wait_for_empty(int wait_flags, rt_tick_t to);
int lwp_pid_for_each(int (*cb)(pid_t pid, void *data), void *data);
void lwp_pid_put(struct rt_lwp *lwp);
void lwp_pid_lock_take(void);
void lwp_pid_lock_release(void);
/**
* @brief Create a new lwp object
* This will initialize the member in the object and register to system.
* Besides, a new pid is allocate with lwp
*
* @param flags control the property of the lwp object. Can be ORed with:
* LWP_CREATE_FLAG_NONE: raw lwp object
* LWP_CREATE_FLAG_ALLOC_PID: lwp object with specified pid
*
* @return struct rt_lwp* object
*/
struct rt_lwp* lwp_create(rt_base_t flags);
void lwp_free(struct rt_lwp* lwp);
int lwp_ref_inc(struct rt_lwp *lwp);
int lwp_ref_dec(struct rt_lwp *lwp);
struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid);
struct rt_lwp* lwp_from_pid_locked(pid_t pid);
pid_t lwp_to_pid(struct rt_lwp* lwp);
pid_t lwp_name2pid(const char* name);
char* lwp_pid2name(int32_t pid);
int lwp_getpid(void);
struct rusage
{
struct timeval ru_utime;
struct timeval ru_stime;
long ru_maxrss;
long ru_ixrss;
long ru_idrss;
long ru_isrss;
long ru_minflt;
long ru_majflt;
long ru_nswap;
long ru_inblock;
long ru_oublock;
long ru_msgsnd;
long ru_msgrcv;
long ru_nsignals;
long ru_nvcsw;
long ru_nivcsw;
long reserved[16];
};
pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru);
rt_err_t lwp_waitpid_kick(struct rt_lwp *parent, struct rt_lwp *self_lwp);
pid_t waitpid(pid_t pid, int *status, int options);
long list_process(void);
void lwp_user_object_lock_init(struct rt_lwp *lwp);
void lwp_user_object_lock_destroy(struct rt_lwp *lwp);
void lwp_user_object_lock(struct rt_lwp *lwp);
void lwp_user_object_unlock(struct rt_lwp *lwp);
int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object);
rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object);
void lwp_user_object_clear(struct rt_lwp *lwp);
void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp);
rt_inline struct rt_lwp *lwp_from_pid_and_lock(pid_t pid)
{
struct rt_lwp *lwp;
lwp_pid_lock_take();
lwp = lwp_from_pid_locked(pid);
if (lwp)
lwp_ref_inc(lwp);
lwp_pid_lock_release();
return lwp;
}
rt_inline void lwp_from_pid_release_lock(struct rt_lwp *lwp)
{
if (lwp)
lwp_ref_dec(lwp);
}
typedef rt_base_t lwp_status_t;
void lwp_thread_exit(rt_thread_t thread, int status);
void lwp_exit(struct rt_lwp *lwp, lwp_status_t status);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,161 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-11 Shell moved lwp_startup() from lwp.c;
* added lwp_teardown()
*/
#define DBG_TAG "lwp"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "lwp_internal.h"
#include <rthw.h>
#include <rtthread.h>
#include <dfs_file.h>
#include <dfs_mnt.h>
#include <dfs_fs.h>
/**
* lwp_runtime:
* Runtime environment provide by init process including boot scripts,
* poweroff, shutdown, reboot, service management etc. In the kernel, lwp will
* provide the underlying software bootstrap and cleanup for the init proc.
*
*/
rt_weak int lwp_startup_debug_request(void)
{
return 0;
}
#define LATENCY_TIMES (3)
#define LATENCY_IN_MSEC (128)
#define LWP_CONSOLE_PATH "CONSOLE=/dev/console"
const char *init_search_path[] = {
"/sbin/init",
"/bin/init",
};
/**
* Startup process 1 and do the essential works
*/
static int lwp_startup(void)
{
int error;
const char *init_path;
char *argv[] = {0, "&"};
char *envp[] = {LWP_CONSOLE_PATH, 0};
#ifdef LWP_DEBUG_INIT
int command;
int countdown = LATENCY_TIMES;
while (countdown)
{
command = lwp_startup_debug_request();
if (command)
{
return 0;
}
rt_kprintf("Press any key to stop init process startup ... %d\n", countdown);
countdown -= 1;
rt_thread_mdelay(LATENCY_IN_MSEC);
}
rt_kprintf("Starting init ...\n");
#endif /* LWP_DEBUG_INIT */
for (size_t i = 0; i < sizeof(init_search_path)/sizeof(init_search_path[0]); i++)
{
struct stat s;
init_path = init_search_path[i];
error = stat(init_path, &s);
if (error == 0)
{
argv[0] = (void *)init_path;
error = lwp_execve((void *)init_path, 0, sizeof(argv)/sizeof(argv[0]), argv, envp);
if (error < 0)
{
LOG_W("%s: failed to setup runtime environment\b"
"\tlwp_execve() failed with code %d", __func__, error);
}
else if (error != 1)
{
LOG_W("%s: pid 1 is already allocated", __func__);
error = -EBUSY;
}
else
{
rt_lwp_t p = lwp_from_pid_locked(1);
p->sig_protected = 1;
error = 0;
}
break;
}
}
if (error)
{
LOG_D("%s: failed to setup runtime environment\b"
"\tinit program not found", __func__);
}
return error;
}
INIT_APP_EXPORT(lwp_startup);
/* don't use heap for safety */
static struct rt_work _teardown_work;
#define INIT_PID 1
static void _teardown_entry(struct rt_work *work, void *work_data)
{
int error;
void (*cb_on_reboot)(void) = work_data;
/* cleanup of process */
do
{
error = lwp_pid_wait_for_empty(RT_KILLABLE, RT_WAITING_FOREVER);
}
while (error);
LOG_I("All processes exited");
cb_on_reboot();
return;
}
static int _get_parent_pid(struct rt_lwp *lwp)
{
return lwp->parent ? lwp->parent->pid : 0;
}
/* reverse operation of lwp_startup() */
sysret_t lwp_teardown(struct rt_lwp *lwp, void (*cb)(void))
{
struct rt_work *work;
if (lwp->pid != INIT_PID && _get_parent_pid(lwp) != INIT_PID)
{
/* The calling process has insufficient privilege */
return -EPERM;
}
work = &_teardown_work;
rt_work_init(work, _teardown_entry, cb);
#define SOME_DELAY (RT_TICK_PER_SECOND / 10) /* allow idle to cleanup resource */
rt_work_submit(work, SOME_DELAY);
lwp_exit(lwp, LWP_CREATE_STAT_EXIT(EXIT_SUCCESS));
/* never return */
RT_ASSERT(0);
return 0;
}

View File

@ -0,0 +1,432 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-17 xqyjlj the first version
* 2023-11-29 Shell Add direct reference of sess for group
*/
#include "lwp.h"
#include "lwp_internal.h"
#include "lwp_syscall.h"
#include "terminal/terminal.h"
#define DBG_TAG "lwp.session"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
rt_session_t lwp_session_find(pid_t sid)
{
rt_base_t level;
rt_session_t session = RT_NULL;
rt_list_t *node = RT_NULL;
struct rt_object_information *information = RT_NULL;
information = rt_object_get_information(RT_Object_Class_Session);
/* parameter check */
if ((sid < 0) || (information == RT_NULL))
{
return RT_NULL;
}
if (sid == 0)
{
sid = lwp_getpid();
}
/* enter critical */
level = rt_spin_lock_irqsave(&(information->spinlock));
/* try to find session */
rt_list_for_each(node, &(information->object_list))
{
session = (rt_session_t)rt_list_entry(node, struct rt_object, list);
if (session->sid == sid)
{
rt_spin_unlock_irqrestore(&(information->spinlock), level);
return session;
}
}
rt_spin_unlock_irqrestore(&(information->spinlock), level);
return RT_NULL;
}
rt_session_t lwp_session_create(rt_lwp_t leader)
{
rt_session_t session = RT_NULL;
/* parameter check */
if (leader == RT_NULL)
{
return RT_NULL;
}
session = rt_malloc(sizeof(struct rt_session));
if (session != RT_NULL)
{
rt_object_init(&(session->object), RT_Object_Class_Session, "session");
rt_list_init(&(session->processgroup));
rt_mutex_init(&(session->mutex), "session", RT_IPC_FLAG_PRIO);
session->leader = leader;
session->sid = leader->pid;
lwp_pgrp_update_children_info(leader->pgrp, session->sid, leader->pgid);
session->foreground_pgid = session->sid;
session->ctty = RT_NULL;
}
return session;
}
int lwp_session_delete(rt_session_t session)
{
int retry = 1;
lwp_tty_t ctty;
/* parameter check */
if (session == RT_NULL)
{
return -EINVAL;
}
/* clear children sid */
lwp_session_update_children_info(session, 0);
while (retry)
{
retry = 0;
ctty = session->ctty;
SESS_LOCK_NESTED(session);
if (session->ctty == ctty)
{
if (ctty)
{
SESS_UNLOCK(session);
/**
* Note: it's safe to release the session lock now. Even if someone
* race to acquire the tty, it's safe under protection of tty_lock()
* and the check inside
*/
tty_lock(ctty);
tty_rel_sess(ctty, session);
session->ctty = RT_NULL;
}
else
{
SESS_UNLOCK(session);
}
}
else
{
SESS_UNLOCK(session);
retry = 1;
}
}
rt_object_detach(&(session->object));
rt_mutex_detach(&(session->mutex));
rt_free(session);
return 0;
}
int lwp_session_insert(rt_session_t session, rt_processgroup_t group)
{
/* parameter check */
if (session == RT_NULL || group == RT_NULL)
{
return -EINVAL;
}
SESS_LOCK_NESTED(session);
PGRP_LOCK_NESTED(group);
group->sid = session->sid;
group->session = session;
lwp_pgrp_update_children_info(group, session->sid, group->pgid);
rt_list_insert_after(&(session->processgroup), &(group->pgrp_list_node));
PGRP_UNLOCK(group);
SESS_UNLOCK(session);
return 0;
}
int lwp_session_remove(rt_session_t session, rt_processgroup_t group)
{
rt_bool_t is_empty = RT_FALSE;
/* parameter check */
if (session == RT_NULL || group == RT_NULL)
{
return -EINVAL;
}
SESS_LOCK_NESTED(session);
PGRP_LOCK_NESTED(group);
rt_list_remove(&(group->pgrp_list_node));
/* clear children sid */
lwp_pgrp_update_children_info(group, 0, group->pgid);
group->sid = 0;
group->session = RT_NULL;
PGRP_UNLOCK(group);
is_empty = rt_list_isempty(&(session->processgroup));
SESS_UNLOCK(session);
if (is_empty)
{
lwp_session_delete(session);
return 1;
}
return 0;
}
int lwp_session_move(rt_session_t session, rt_processgroup_t group)
{
rt_session_t prev_session;
/* parameter check */
if (session == RT_NULL || group == RT_NULL)
{
return -EINVAL;
}
if (lwp_sid_get_bysession(session) == lwp_sid_get_bypgrp(group))
{
return 0;
}
SESS_LOCK(session);
prev_session = group->session;
if (prev_session)
{
SESS_LOCK(prev_session);
lwp_session_remove(prev_session, group);
SESS_UNLOCK(prev_session);
}
lwp_session_insert(session, group);
SESS_UNLOCK(session);
return 0;
}
int lwp_session_update_children_info(rt_session_t session, pid_t sid)
{
rt_list_t *node = RT_NULL;
rt_processgroup_t group = RT_NULL;
if (session == RT_NULL)
{
return -EINVAL;
}
SESS_LOCK_NESTED(session);
rt_list_for_each(node, &(session->processgroup))
{
group = (rt_processgroup_t)rt_list_entry(node, struct rt_processgroup, pgrp_list_node);
PGRP_LOCK_NESTED(group);
if (sid != -1)
{
group->sid = sid;
group->session = session;
lwp_pgrp_update_children_info(group, sid, group->pgid);
}
PGRP_UNLOCK(group);
}
SESS_UNLOCK(session);
return 0;
}
int lwp_session_set_foreground(rt_session_t session, pid_t pgid)
{
rt_processgroup_t group = RT_NULL;
rt_list_t *node = RT_NULL;
rt_bool_t is_contains = RT_FALSE;
/* parameter check */
if (session == RT_NULL || pgid <= 0)
{
return -EINVAL;
}
SESS_LOCK(session);
rt_list_for_each(node, &(session->processgroup))
{
group = (rt_processgroup_t)rt_list_entry(node, struct rt_processgroup, pgrp_list_node);
PGRP_LOCK(group);
if (group->pgid == pgid)
{
is_contains = RT_TRUE;
}
PGRP_UNLOCK(group);
}
if (is_contains)
{
session->foreground_pgid = pgid;
// TODO: maybe notify tty
}
SESS_UNLOCK(session);
return is_contains ? 0 : -EINVAL;
}
/**
* setsid() creates a new session if the calling process is not a process group leader.
* The calling process is the leader of the new session (i.e., its session ID is made the same as its process ID).
* The calling process also becomes the process group leader of a new process group in the session
* (i.e., its process group ID is made the same as its process ID).
*/
sysret_t sys_setsid(void)
{
rt_lwp_t process;
pid_t pid;
rt_processgroup_t group;
rt_session_t session;
sysret_t err = 0;
process = lwp_self();
pid = lwp_to_pid(process);
/**
* if the calling process is already a process group leader.
*/
if (lwp_pgrp_find(pid))
{
err = -EPERM;
goto exit;
}
group = lwp_pgrp_create(process);
if (group)
{
lwp_pgrp_move(group, process);
session = lwp_session_create(process);
if (session)
{
lwp_session_move(session, group);
}
else
{
lwp_pgrp_delete(group);
}
err = lwp_sid_get_bysession(session);
}
else
{
err = -ENOMEM;
}
exit:
return err;
}
/**
* getsid() returns the session ID of the process with process ID pid.
* If pid is 0, getsid() returns the session ID of the calling process.
*/
sysret_t sys_getsid(pid_t pid)
{
rt_lwp_t process, self_process;
pid_t sid;
lwp_pid_lock_take();
process = lwp_from_pid_locked(pid);
lwp_pid_lock_release();
if (process == RT_NULL)
{
return -ESRCH;
}
self_process = lwp_self();
sid = lwp_sid_get_byprocess(process);
if (sid != lwp_sid_get_byprocess(self_process))
{
/**
* A process with process ID pid exists, but it is not in the same session as the calling process,
* and the implementation considers this an error.
*
* Note: Linux does not return EPERM.
*/
return -EPERM;
}
return sid;
}
#ifdef RT_USING_FINSH
#include "finsh.h"
long list_session(void)
{
int count = 0, index;
rt_session_t *sessions;
rt_session_t session;
rt_thread_t thread;
char name[RT_NAME_MAX];
rt_kprintf("SID leader process\n");
rt_kprintf("---- ----------------\n");
count = rt_object_get_length(RT_Object_Class_Session);
if (count > 0)
{
/* get pointers */
sessions = (rt_session_t *)rt_calloc(count, sizeof(rt_session_t));
if (sessions)
{
index = rt_object_get_pointers(RT_Object_Class_Session, (rt_object_t *)sessions, count);
if (index > 0)
{
for (index = 0; index < count; index++)
{
struct rt_session se;
session = sessions[index];
SESS_LOCK(session);
rt_memcpy(&se, session, sizeof(struct rt_session));
SESS_UNLOCK(session);
if (se.leader && se.leader)
{
thread = rt_list_entry(se.leader->t_grp.prev, struct rt_thread, sibling);
rt_strncpy(name, thread->parent.name, RT_NAME_MAX);
}
else
{
rt_strncpy(name, "nil", RT_NAME_MAX);
}
rt_kprintf("%4d %-*.*s\n", se.sid, RT_NAME_MAX, RT_NAME_MAX, name);
}
}
rt_free(sessions);
}
}
return 0;
}
MSH_CMD_EXPORT(list_session, list session);
#endif

View File

@ -0,0 +1,465 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-12 Jesven first version
* 2023-02-20 wangxiaoyao adapt to mm
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef ARCH_MM_MMU
#include <lwp.h>
#include <lwp_shm.h>
#include <lwp_mm.h>
#include <lwp_user_mm.h>
#include <mmu.h>
/* the kernel structure to represent a share-memory */
struct lwp_shm_struct
{
struct rt_mem_obj mem_obj;
size_t addr; /* point to the next item in the free list when not used */
size_t size;
int ref;
size_t key;
};
static struct lwp_avl_struct *shm_tree_key;
static struct lwp_avl_struct *shm_tree_pa;
static int shm_free_list = -1; /* the single-direct list of freed items */
static int shm_id_used = 0; /* the latest allocated item in the array */
static struct lwp_shm_struct _shm_ary[RT_LWP_SHM_MAX_NR];
static const char *get_shm_name(rt_varea_t varea)
{
return "user.shm";
}
static void on_shm_varea_open(struct rt_varea *varea)
{
struct lwp_shm_struct *shm;
shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
shm->ref += 1;
}
static void on_shm_varea_close(struct rt_varea *varea)
{
struct lwp_shm_struct *shm;
shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
shm->ref -= 1;
}
static void on_shm_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
{
struct lwp_shm_struct *shm;
int err;
shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
/* map all share page frames to user space in a time */
void *page = (void *)shm->addr;
void *pg_paddr = (char *)page + PV_OFFSET;
err = rt_varea_map_range(varea, varea->start, pg_paddr, shm->size);
if (err == RT_EOK)
{
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
msg->response.size = shm->size;
msg->response.vaddr = page;
}
return ;
}
/*
* Try to allocate an structure 'lwp_shm_struct' from the freed list or the
* static array.
*/
static int _shm_id_alloc(void)
{
int id = -1;
if (shm_free_list != -1) /* first try the freed list */
{
id = shm_free_list;
shm_free_list = (int)_shm_ary[shm_free_list].addr; /* single-direction */
}
else if (shm_id_used < RT_LWP_SHM_MAX_NR) /* then try the array */
{
id = shm_id_used;
shm_id_used++;
}
return id;
}
/* Release the item in the static array to the freed list. */
static void shm_id_free(int id)
{
/* link the freed itme to the single-direction list */
_shm_ary[id].addr = (size_t)shm_free_list;
shm_free_list = id;
}
/* Locate the shared memory through 'key' or create a new one. */
static int _lwp_shmget(size_t key, size_t size, int create)
{
int id = -1;
struct lwp_avl_struct *node_key = 0;
struct lwp_avl_struct *node_pa = 0;
void *page_addr = 0;
uint32_t bit = 0;
/* try to locate the item with the key in the binary tree */
node_key = lwp_avl_find(key, shm_tree_key);
if (node_key)
{
return (struct lwp_shm_struct *)node_key->data - _shm_ary; /* the index */
}
/* If there doesn't exist such an item and we're allowed to create one ... */
if (create)
{
struct lwp_shm_struct* p;
if (!size)
{
goto err;
}
id = _shm_id_alloc();
if (id == -1)
{
goto err;
}
/* allocate pages up to 2's exponent to cover the required size */
bit = rt_page_bits(size);
page_addr = rt_pages_alloc_ext(bit, PAGE_ANY_AVAILABLE); /* virtual address */
if (!page_addr)
{
goto err;
}
/* initialize the shared memory structure */
p = _shm_ary + id;
p->addr = (size_t)page_addr;
p->size = (1UL << (bit + ARCH_PAGE_SHIFT));
p->ref = 0;
p->key = key;
p->mem_obj.get_name = get_shm_name;
p->mem_obj.on_page_fault = on_shm_page_fault;
p->mem_obj.on_varea_open = on_shm_varea_open;
p->mem_obj.on_varea_close = on_shm_varea_close;
p->mem_obj.hint_free = NULL;
/* then insert it into the balancing binary tree */
node_key = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct) * 2);
if (!node_key)
{
goto err;
}
node_key->avl_key = p->key;
node_key->data = (void *)p;
lwp_avl_insert(node_key, &shm_tree_key);
node_pa = node_key + 1;
node_pa->avl_key = p->addr;
node_pa->data = (void *)p;
lwp_avl_insert(node_pa, &shm_tree_pa);
}
return id;
err:
if (id != -1)
{
shm_id_free(id);
}
if (page_addr)
{
rt_pages_free(page_addr, bit);
}
if (node_key)
{
rt_free(node_key);
}
return -1;
}
/* A wrapping function, get the shared memory with interrupts disabled. */
int lwp_shmget(size_t key, size_t size, int create)
{
int ret = 0;
rt_mm_lock();
ret = _lwp_shmget(key, size, create);
rt_mm_unlock();
return ret;
}
/* Locate the binary tree node_key corresponding to the shared-memory id. */
static struct lwp_avl_struct *shm_id_to_node(int id)
{
struct lwp_avl_struct *node_key = 0;
struct lwp_shm_struct *p = RT_NULL;
/* check id */
if (id < 0 || id >= RT_LWP_SHM_MAX_NR)
{
return RT_NULL;
}
p = _shm_ary + id; /* the address of the shared-memory structure */
node_key = lwp_avl_find(p->key, shm_tree_key);
if (!node_key)
{
return RT_NULL;
}
if (node_key->data != (void *)p)
{
return RT_NULL;
}
return node_key;
}
/* Free the shared pages, the shared-memory structure and its binary tree node_key. */
static int _lwp_shmrm(int id)
{
struct lwp_avl_struct *node_key = RT_NULL;
struct lwp_avl_struct *node_pa = RT_NULL;
struct lwp_shm_struct* p = RT_NULL;
uint32_t bit = 0;
node_key = shm_id_to_node(id);
if (!node_key)
{
return -1;
}
p = (struct lwp_shm_struct *)node_key->data;
if (p->ref)
{
return 0;
}
bit = rt_page_bits(p->size);
rt_pages_free((void *)p->addr, bit);
lwp_avl_remove(node_key, &shm_tree_key);
node_pa = node_key + 1;
lwp_avl_remove(node_pa, &shm_tree_pa);
rt_free(node_key);
shm_id_free(id);
return 0;
}
/* A wrapping function, free the shared memory with interrupt disabled. */
int lwp_shmrm(int id)
{
int ret = 0;
ret = _lwp_shmrm(id);
return ret;
}
/* Map the shared memory specified by 'id' to the specified virtual address. */
static void *_lwp_shmat(int id, void *shm_vaddr)
{
int err;
struct rt_lwp *lwp = RT_NULL;
struct lwp_avl_struct *node_key = RT_NULL;
struct lwp_shm_struct *p = RT_NULL;
void *va = shm_vaddr;
/* The id is used to locate the node_key in the binary tree, and then get the
* shared-memory structure linked to the node_key. We don't use the id to refer
* to the shared-memory structure directly, because the binary tree is used
* to verify the structure is really in use.
*/
node_key = shm_id_to_node(id);
if (!node_key)
{
return RT_NULL;
}
p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
/* map the shared memory into the address space of the current thread */
lwp = lwp_self();
if (!lwp)
{
return RT_NULL;
}
err = rt_aspace_map(lwp->aspace, &va, p->size, MMU_MAP_U_RWCB, MMF_PREFETCH,
&p->mem_obj, 0);
if (err != RT_EOK)
{
va = RT_NULL;
}
return va;
}
/* A wrapping function: attach the shared memory to the specified address. */
void *lwp_shmat(int id, void *shm_vaddr)
{
void *ret = RT_NULL;
if (((size_t)shm_vaddr & ARCH_PAGE_MASK) != 0)
{
return RT_NULL;
}
ret = _lwp_shmat(id, shm_vaddr);
return ret;
}
static struct lwp_shm_struct *_lwp_shm_struct_get(struct rt_lwp *lwp, void *shm_vaddr)
{
void *pa = RT_NULL;
struct lwp_avl_struct *node_pa = RT_NULL;
if (!lwp)
{
return RT_NULL;
}
pa = lwp_v2p(lwp, shm_vaddr); /* physical memory */
node_pa = lwp_avl_find((size_t)pa, shm_tree_pa);
if (!node_pa)
{
return RT_NULL;
}
return (struct lwp_shm_struct *)node_pa->data;
}
static int _lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
{
struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
if (p)
{
p->ref++;
return p->ref;
}
return -1;
}
int lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
{
int ret = 0;
rt_mm_lock();
ret = _lwp_shm_ref_inc(lwp, shm_vaddr);
rt_mm_unlock();
return ret;
}
static int _lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
{
struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
if (p && (p->ref > 0))
{
p->ref--;
return p->ref;
}
return -1;
}
int lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
{
int ret = 0;
rt_mm_lock();
ret = _lwp_shm_ref_dec(lwp, shm_vaddr);
rt_mm_unlock();
return ret;
}
/* Unmap the shared memory from the address space of the current thread. */
int _lwp_shmdt(void *shm_vaddr)
{
struct rt_lwp *lwp = RT_NULL;
int ret = 0;
lwp = lwp_self();
if (!lwp)
{
return -1;
}
ret = rt_aspace_unmap(lwp->aspace, shm_vaddr);
if (ret != RT_EOK)
{
ret = -1;
}
return ret;
}
/* A wrapping function: detach the mapped shared memory. */
int lwp_shmdt(void *shm_vaddr)
{
int ret = 0;
rt_mm_lock();
ret = _lwp_shmdt(shm_vaddr);
rt_mm_unlock();
return ret;
}
/* Get the virtual address of a shared memory in kernel. */
void *_lwp_shminfo(int id)
{
struct lwp_avl_struct *node_key = RT_NULL;
struct lwp_shm_struct *p = RT_NULL;
/* the share memory is in use only if it exsits in the binary tree */
node_key = shm_id_to_node(id);
if (!node_key)
{
return RT_NULL;
}
p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
return (void *)((char *)p->addr - PV_OFFSET); /* get the virtual address */
}
/* A wrapping function: get the virtual address of a shared memory. */
void *lwp_shminfo(int id)
{
void *vaddr = RT_NULL;
rt_mm_lock();
vaddr = _lwp_shminfo(id);
rt_mm_unlock();
return vaddr;
}
#ifdef RT_USING_FINSH
static int _shm_info(struct lwp_avl_struct* node_key, void *data)
{
int id = 0;
struct lwp_shm_struct* p = (struct lwp_shm_struct *)node_key->data;
id = p - _shm_ary;
rt_kprintf("0x%08x 0x%08x 0x%08x %8d\n", p->key, p->addr, p->size, id);
return 0;
}
void list_shm(void)
{
rt_kprintf(" key paddr size id\n");
rt_kprintf("---------- ---------- ---------- --------\n");
rt_mm_lock();
lwp_avl_traversal(shm_tree_key, _shm_info, NULL);
rt_mm_unlock();
}
MSH_CMD_EXPORT(list_shm, show share memory info);
#endif
#endif

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-12 Jesven first version
*/
#ifndef __LWP_SHM_H__
#define __LWP_SHM_H__
#include <lwp_avl.h>
#ifdef __cplusplus
extern "C" {
#endif
int lwp_shmget(size_t key, size_t size, int create);
int lwp_shmrm(int id);
void* lwp_shmat(int id, void* shm_vaddr);
int lwp_shmdt(void* shm_vaddr);
void *lwp_shminfo(int id);
int lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr);
int lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr);
#ifdef __cplusplus
}
#endif
#endif /*__LWP_SHM_H__*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,238 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-02-23 Jesven first version.
* 2023-07-06 Shell update the generation, pending and delivery API
* 2023-11-22 Shell support for job control signal
*/
#ifndef __LWP_SIGNAL_H__
#define __LWP_SIGNAL_H__
#include "syscall_generic.h"
#include <rtthread.h>
#include <sys/signal.h>
#ifdef __cplusplus
extern "C" {
#endif
#define _USIGNAL_SIGMASK(signo) (1u << ((signo)-1))
#define LWP_SIG_NO_IGN_SET \
(_USIGNAL_SIGMASK(SIGCONT) | _USIGNAL_SIGMASK(SIGSTOP) | \
_USIGNAL_SIGMASK(SIGKILL))
#define LWP_SIG_IGNORE_SET \
(_USIGNAL_SIGMASK(SIGCHLD) | _USIGNAL_SIGMASK(SIGURG) | \
_USIGNAL_SIGMASK(SIGWINCH) /* from 4.3 BSD, not POSIX.1 */)
#define LWP_SIG_JOBCTL_SET \
(_USIGNAL_SIGMASK(SIGCONT) | _USIGNAL_SIGMASK(SIGSTOP) | \
_USIGNAL_SIGMASK(SIGTSTP) | _USIGNAL_SIGMASK(SIGTTIN) | \
_USIGNAL_SIGMASK(SIGTTOU))
#define LWP_SIG_STOP_SET \
(_USIGNAL_SIGMASK(SIGSTOP) | _USIGNAL_SIGMASK(SIGTSTP) | \
_USIGNAL_SIGMASK(SIGTTIN) | _USIGNAL_SIGMASK(SIGTTOU))
#define LWP_SIG_ACT_DFL ((lwp_sighandler_t)0)
#define LWP_SIG_ACT_IGN ((lwp_sighandler_t)1)
#define LWP_SIG_USER_SA_FLAGS \
(SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS)
#define LWP_SIG_INVALID_TIMER ((timer_t)-1)
typedef enum
{
LWP_SIG_MASK_CMD_BLOCK,
LWP_SIG_MASK_CMD_UNBLOCK,
LWP_SIG_MASK_CMD_SET_MASK,
__LWP_SIG_MASK_CMD_WATERMARK
} lwp_sig_mask_cmd_t;
/**
* LwP implementation of POSIX signal
*/
struct lwp_signal
{
timer_t real_timer;
struct lwp_sigqueue sig_queue;
rt_thread_t sig_dispatch_thr[_LWP_NSIG];
lwp_sighandler_t sig_action[_LWP_NSIG];
lwp_sigset_t sig_action_mask[_LWP_NSIG];
lwp_sigset_t sig_action_nodefer;
lwp_sigset_t sig_action_onstack;
lwp_sigset_t sig_action_restart;
lwp_sigset_t sig_action_siginfo;
lwp_sigset_t sig_action_nocldstop;
lwp_sigset_t sig_action_nocldwait;
};
struct rt_lwp;
struct rt_processgroup;
#ifndef ARCH_MM_MMU
void lwp_sighandler_set(int sig, lwp_sighandler_t func);
void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func);
#endif
rt_inline void lwp_sigqueue_init(lwp_sigqueue_t sigq)
{
rt_memset(&sigq->sigset_pending, 0, sizeof(lwp_sigset_t));
rt_list_init(&sigq->siginfo_list);
}
/**
* @brief release the signal queue
*
* @param sigq target signal queue
*/
void lwp_sigqueue_clear(lwp_sigqueue_t sigq);
rt_err_t lwp_signal_init(struct lwp_signal *sig);
rt_err_t lwp_signal_detach(struct lwp_signal *signal);
rt_inline void lwp_thread_signal_detach(struct lwp_thread_signal *tsig)
{
lwp_sigqueue_clear(&tsig->sig_queue);
}
/**
* @brief send a signal to the process
*
* @param lwp the process to be killed
* @param signo the signal number
* @param code as in siginfo
* @param value as in siginfo
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as
* successful
*
* @note the *signal_kill have the same definition of a successful return as
* kill() in IEEE Std 1003.1-2017
*/
rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code,
lwp_siginfo_ext_t value);
/**
* @brief set or examine the signal action of signo
*
* @param signo signal number
* @param act the signal action
* @param oact the old signal action
* @return rt_err_t
*/
rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
const struct lwp_sigaction *restrict act,
struct lwp_sigaction *restrict oact);
/**
* @brief send a signal to the thread
*
* @param thread target thread
* @param signo the signal number
* @param code as in siginfo
* @param value as in siginfo
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as
* successful
*/
rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code,
lwp_siginfo_ext_t value);
/**
* @brief set signal mask of target thread
*
* @param thread the target thread
* @param how command
* @param sigset operand
* @param oset the address to old set
* @return rt_err_t
*/
rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
const lwp_sigset_t *sigset, lwp_sigset_t *oset);
/**
* @brief Catch signal if exists and no return, otherwise return with no
* side effect
*
* @param exp_frame the exception frame on kernel stack
*/
void lwp_thread_signal_catch(void *exp_frame);
/**
* @brief Check if it's okay to suspend for current lwp thread
*
* @param thread target thread
* @param suspend_flag suspend flag of target thread
* @return int 1 if can be suspended, otherwise not
*/
int lwp_thread_signal_suspend_check(rt_thread_t thread, int suspend_flag);
/**
* @brief Asynchronously wait for signal
*
* @param thread target thread
* @param sigset the signals to be waited
* @param info address of user siginfo
* @param timeout timeout of waiting
* @return rt_err_t
*/
rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
siginfo_t *usi, struct timespec *timeout);
/**
* @brief Examine the set of signals that are blocked from delivery to the
* calling thread and that are pending on the process or the calling thread
*
* @param thread target thread
* @param sigset where mask of pending signals is returned
*/
void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *sigset);
/**
* @brief send a signal to the process group
*
* @param pgrp target process group
* @param signo the signal number
* @param code as in siginfo
* @param value as in siginfo
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as
* successful
*/
rt_err_t lwp_pgrp_signal_kill(struct rt_processgroup *pgrp, long signo,
long code, lwp_siginfo_ext_t value);
rt_inline int lwp_sigismember(lwp_sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if (_LWP_NSIG_WORDS == 1)
{
return 1 & (set->sig[0] >> sig);
}
else
{
return 1 & (set->sig[sig / _LWP_NSIG_BPW] >> (sig % _LWP_NSIG_BPW));
}
}
struct itimerspec;
rt_bool_t lwp_sigisign(struct rt_lwp *lwp, int _sig);
rt_err_t lwp_signal_setitimer(struct rt_lwp *lwp, int which,
const struct itimerspec *restrict new,
struct itimerspec *restrict old);
rt_bool_t lwp_signal_restart_syscall(struct rt_lwp *lwp, int error_code);
rt_err_t lwp_signal_kill_all(long signo, long code, lwp_siginfo_ext_t value);
#ifdef __cplusplus
}
#endif
#endif /* __LWP_SIGNAL_H__ */

View File

@ -0,0 +1,163 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-03-13 RT-Thread Export as header
*/
#ifndef __LWP_SYS_SOCKET_H__
#define __LWP_SYS_SOCKET_H__
/* socket levels */
#define INTF_SOL_SOCKET 1
#define IMPL_SOL_SOCKET 0xFFF
#define INTF_IPPROTO_IP 0
#define IMPL_IPPROTO_IP 0
#define INTF_IPPROTO_TCP 6
#define IMPL_IPPROTO_TCP 6
#define INTF_IPPROTO_IPV6 41
#define IMPL_IPPROTO_IPV6 41
/* SOL_SOCKET option names */
#define INTF_SO_BROADCAST 6
#define INTF_SO_KEEPALIVE 9
#define INTF_SO_REUSEADDR 2
#define INTF_SO_TYPE 3
#define INTF_SO_ERROR 4
#define INTF_SO_SNDTIMEO 21
#define INTF_SO_RCVTIMEO 20
#define INTF_SO_RCVBUF 8
#define INTF_SO_LINGER 13
#define INTF_SO_NO_CHECK 11
#define INTF_SO_ACCEPTCONN 30
#define INTF_SO_DONTROUTE 5
#define INTF_SO_OOBINLINE 10
#define INTF_SO_REUSEPORT 15
#define INTF_SO_SNDBUF 7
#define INTF_SO_SNDLOWAT 19
#define INTF_SO_RCVLOWAT 18
#define INTF_SO_BINDTODEVICE 25
#define INTF_SO_TIMESTAMPNS 35
#define INTF_SO_TIMESTAMPING 37
#define INTF_SO_SELECT_ERR_QUEUE 45
#define IMPL_SO_BROADCAST 0x0020
#define IMPL_SO_KEEPALIVE 0x0008
#define IMPL_SO_REUSEADDR 0x0004
#define IMPL_SO_TYPE 0x1008
#define IMPL_SO_ERROR 0x1007
#define IMPL_SO_SNDTIMEO 0x1005
#define IMPL_SO_RCVTIMEO 0x1006
#define IMPL_SO_RCVBUF 0x1002
#define IMPL_SO_LINGER 0x0080
#define IMPL_SO_NO_CHECK 0x100a
#define IMPL_SO_ACCEPTCONN 0x0002
#define IMPL_SO_DONTROUTE 0x0010
#define IMPL_SO_OOBINLINE 0x0100
#define IMPL_SO_REUSEPORT 0x0200
#define IMPL_SO_SNDBUF 0x1001
#define IMPL_SO_SNDLOWAT 0x1003
#define IMPL_SO_RCVLOWAT 0x1004
#define IMPL_SO_BINDTODEVICE 0x100b
#define IMPL_SO_TIMESTAMPNS INTF_SO_TIMESTAMPNS
#define IMPL_SO_TIMESTAMPING INTF_SO_TIMESTAMPING
#define IMPL_SO_SELECT_ERR_QUEUE INTF_SO_SELECT_ERR_QUEUE
/* IPPROTO_IP option names */
#define INTF_IP_TTL 2
#define INTF_IP_TOS 1
#define INTF_IP_MULTICAST_TTL 33
#define INTF_IP_MULTICAST_IF 32
#define INTF_IP_MULTICAST_LOOP 34
#define INTF_IP_ADD_MEMBERSHIP 35
#define INTF_IP_DROP_MEMBERSHIP 36
#define IMPL_IP_TTL 2
#define IMPL_IP_TOS 1
#define IMPL_IP_MULTICAST_TTL 5
#define IMPL_IP_MULTICAST_IF 6
#define IMPL_IP_MULTICAST_LOOP 7
#define IMPL_IP_ADD_MEMBERSHIP 3
#define IMPL_IP_DROP_MEMBERSHIP 4
/* IPPROTO_TCP option names */
#define INTF_TCP_NODELAY 1
#define INTF_TCP_KEEPALIVE 9
#define INTF_TCP_KEEPIDLE 4
#define INTF_TCP_KEEPINTVL 5
#define INTF_TCP_KEEPCNT 6
#define IMPL_TCP_NODELAY 0x01
#define IMPL_TCP_KEEPALIVE 0x02
#define IMPL_TCP_KEEPIDLE 0x03
#define IMPL_TCP_KEEPINTVL 0x04
#define IMPL_TCP_KEEPCNT 0x05
/* IPPROTO_IPV6 option names */
#define INTF_IPV6_V6ONLY 26
#define IMPL_IPV6_V6ONLY 27
struct musl_sockaddr
{
uint16_t sa_family;
char sa_data[14];
};
struct musl_ifmap {
unsigned long int mem_start;
unsigned long int mem_end;
unsigned short int base_addr;
unsigned char irq;
unsigned char dma;
unsigned char port;
};
struct musl_ifreq
{
union
{
#define IFNAMSIZ 16
char ifrn_name[IFNAMSIZ];
} ifr_ifrn;
union
{
struct musl_sockaddr ifru_addr;
struct musl_sockaddr ifru_dstaddr;
struct musl_sockaddr ifru_broadaddr;
struct musl_sockaddr ifru_netmask;
struct musl_sockaddr ifru_hwaddr;
short int ifru_flags;
int ifru_ivalue;
int ifru_mtu;
struct musl_ifmap ifru_map;
char ifru_slave[IFNAMSIZ];
char ifru_newname[IFNAMSIZ];
char *ifru_data;
} ifr_ifru;
};
struct musl_rtentry
{
unsigned long int rt_pad1;
struct musl_sockaddr rt_dst;
struct musl_sockaddr rt_gateway;
struct musl_sockaddr rt_genmask;
unsigned short int rt_flags;
short int rt_pad2;
unsigned long int rt_pad3;
unsigned char rt_tos;
unsigned char rt_class;
short int rt_pad4[sizeof(long)/2-1];
short int rt_metric;
char *rt_dev;
unsigned long int rt_mtu;
unsigned long int rt_window;
unsigned short int rt_irtt;
};
#endif /* __LWP_SYS_SOCKET_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-11-12 Jesven the first version
*/
#ifndef __LWP_SYSCALL_H__
#define __LWP_SYSCALL_H__
#ifdef RT_USING_MUSLLIBC
#include "libc_musl.h"
#endif
#include "syscall_generic.h"
#include <stdint.h>
#include <rtthread.h>
#include <dfs_file.h>
#include <unistd.h>
#include <stdio.h> /* rename() */
#include <sys/stat.h>
#include <sys/statfs.h> /* statfs() */
#include <poll.h>
#include <sys/time.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef uint32_t id_t; /* may contain pid, uid or gid */
/*
* Process priority specifications to get/setpriority.
*/
#define PRIO_MIN (-20)
#define PRIO_MAX 20
#define PRIO_PROCESS 0 /* only support lwp process */
#define PRIO_PGRP 1
#define PRIO_USER 2
const char *lwp_get_syscall_name(rt_uint32_t number);
const void *lwp_get_sys_api(rt_uint32_t number);
sysret_t sys_exit(int value);
sysret_t sys_exit_group(int status);
ssize_t sys_read(int fd, void *buf, size_t nbyte);
ssize_t sys_write(int fd, const void *buf, size_t nbyte);
size_t sys_lseek(int fd, size_t offset, int whence);
sysret_t sys_open(const char *name, int mode, ...);
sysret_t sys_close(int fd);
sysret_t sys_ioctl(int fd, unsigned long cmd, void* data);
sysret_t sys_fstat(int file, struct stat *buf);
sysret_t sys_poll(struct pollfd *fds, nfds_t nfds, int timeout);
sysret_t sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
sysret_t sys_gettimeofday(struct timeval *tp, struct timezone *tzp);
sysret_t sys_settimeofday(const struct timeval *tv, const struct timezone *tzp);
sysret_t sys_exec(char *filename, int argc, char **argv, char **envp);
sysret_t sys_kill(int pid, int sig);
sysret_t sys_getpid(void);
sysret_t sys_getpriority(int which, id_t who);
sysret_t sys_setpriority(int which, id_t who, int prio);
rt_sem_t sys_sem_create(const char *name, rt_uint32_t value, rt_uint8_t flag);
sysret_t sys_sem_delete(rt_sem_t sem);
sysret_t sys_sem_take(rt_sem_t sem, rt_int32_t time);
sysret_t sys_sem_release(rt_sem_t sem);
rt_mutex_t sys_mutex_create(const char *name, rt_uint8_t flag);
sysret_t sys_mutex_delete(rt_mutex_t mutex);
sysret_t sys_mutex_take(rt_mutex_t mutex, rt_int32_t time);
sysret_t sys_mutex_release(rt_mutex_t mutex);
rt_event_t sys_event_create(const char *name, rt_uint8_t flag);
sysret_t sys_event_delete(rt_event_t event);
sysret_t sys_event_send(rt_event_t event, rt_uint32_t set);
sysret_t sys_event_recv(rt_event_t event, rt_uint32_t set, rt_uint8_t opt, rt_int32_t timeout, rt_uint32_t *recved);
rt_mailbox_t sys_mb_create(const char *name, rt_size_t size, rt_uint8_t flag);
sysret_t sys_mb_delete(rt_mailbox_t mb);
sysret_t sys_mb_send(rt_mailbox_t mb, rt_ubase_t value);
sysret_t sys_mb_send_wait(rt_mailbox_t mb, rt_ubase_t value, rt_int32_t timeout);
sysret_t sys_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
rt_mq_t sys_mq_create(const char *name, rt_size_t msg_size, rt_size_t max_msgs, rt_uint8_t flag);
sysret_t sys_mq_delete(rt_mq_t mq);
sysret_t sys_mq_send(rt_mq_t mq, void *buffer, rt_size_t size);
sysret_t sys_mq_urgent(rt_mq_t mq, void *buffer, rt_size_t size);
sysret_t sys_mq_recv(rt_mq_t mq, void *buffer, rt_size_t size, rt_int32_t timeout);
rt_thread_t sys_thread_create(void *arg[]);
sysret_t sys_thread_delete(rt_thread_t thread);
sysret_t sys_thread_startup(rt_thread_t thread);
rt_thread_t sys_thread_self(void);
sysret_t sys_channel_open(const char *name, int flags);
sysret_t sys_channel_close(int fd);
sysret_t sys_channel_send(int fd, rt_channel_msg_t data);
sysret_t sys_channel_send_recv(int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret);
sysret_t sys_channel_reply(int fd, rt_channel_msg_t data);
sysret_t sys_channel_recv(int fd, rt_channel_msg_t data);
void sys_enter_critical(void);
void sys_exit_critical(void);
sysret_t sys_dup(int oldfd);
sysret_t sys_dup2(int oldfd, int new);
sysret_t sys_log(const char* log, int size);
#ifdef ARCH_MM_MMU
sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout, int *uaddr2, int val3);
sysret_t sys_cacheflush(void *addr, int len, int cache);
#endif /* ARCH_MM_MMU */
sysret_t sys_setsid(void);
sysret_t sys_getsid(pid_t pid);
sysret_t sys_setpgid(pid_t pid, pid_t pgid);
sysret_t sys_getpgid(pid_t pid);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,194 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-15 shaojinchun first version
* 2023-11-16 xqyjlj Fix the case where tid is 0
*/
#define DBG_TAG "lwp.tid"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
#include <rthw.h>
#include <rtthread.h>
#include "lwp_internal.h"
#ifdef ARCH_MM_MMU
#include "lwp_user_mm.h"
#endif
#define TID_MAX 10000
#define TID_CT_ASSERT(name, x) \
struct assert_##name {char ary[2 * (x) - 1];}
TID_CT_ASSERT(tid_min_nr, LWP_TID_MAX_NR > 1);
TID_CT_ASSERT(tid_max_nr, LWP_TID_MAX_NR < TID_MAX);
static struct lwp_avl_struct lwp_tid_ary[LWP_TID_MAX_NR];
static struct lwp_avl_struct *lwp_tid_free_head = RT_NULL;
static int lwp_tid_ary_alloced = 0;
static struct lwp_avl_struct *lwp_tid_root = RT_NULL;
static int current_tid = 0;
static struct rt_mutex tid_lock;
int lwp_tid_init(void)
{
return rt_mutex_init(&tid_lock, "tidmtx", RT_IPC_FLAG_PRIO);
}
int lwp_tid_get(void)
{
struct lwp_avl_struct *p;
int tid = 0;
lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
p = lwp_tid_free_head;
if (p)
{
lwp_tid_free_head = (struct lwp_avl_struct *)p->avl_right;
}
else if (lwp_tid_ary_alloced < LWP_TID_MAX_NR)
{
p = lwp_tid_ary + lwp_tid_ary_alloced;
lwp_tid_ary_alloced++;
}
if (p)
{
int found_noused = 0;
RT_ASSERT(p->data == RT_NULL);
for (tid = current_tid + 1; tid < TID_MAX; tid++)
{
if (!lwp_avl_find(tid, lwp_tid_root))
{
found_noused = 1;
break;
}
}
if (!found_noused)
{
for (tid = 1; tid <= current_tid; tid++)
{
if (!lwp_avl_find(tid, lwp_tid_root))
{
found_noused = 1;
break;
}
}
}
p->avl_key = tid;
lwp_avl_insert(p, &lwp_tid_root);
current_tid = tid;
}
lwp_mutex_release_safe(&tid_lock);
if (tid <= 0)
{
LOG_W("resource TID exhausted.");
}
return tid;
}
void lwp_tid_put(int tid)
{
struct lwp_avl_struct *p;
rt_thread_t thread;
rt_thread_t current;
lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
p = lwp_avl_find(tid, lwp_tid_root);
if (p)
{
thread = p->data;
p->data = RT_NULL;
lwp_avl_remove(p, &lwp_tid_root);
p->avl_right = lwp_tid_free_head;
lwp_tid_free_head = p;
}
else
thread = RT_NULL;
if (thread && thread->tid_ref_count)
{
current = rt_thread_self();
RT_ASSERT(thread->susp_recycler == RT_NULL);
thread->susp_recycler = current;
rt_enter_critical();
rt_thread_suspend_with_flag(current, RT_UNINTERRUPTIBLE);
lwp_mutex_release_safe(&tid_lock);
rt_exit_critical();
rt_schedule();
}
else
lwp_mutex_release_safe(&tid_lock);
}
rt_thread_t lwp_tid_get_thread_raw(int tid)
{
struct lwp_avl_struct *p;
rt_thread_t thread = RT_NULL;
p = lwp_avl_find(tid, lwp_tid_root);
if (p)
{
thread = (rt_thread_t)p->data;
}
return thread;
}
rt_thread_t lwp_tid_get_thread_and_inc_ref(int tid)
{
rt_thread_t thread = RT_NULL;
lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
thread = tid ? lwp_tid_get_thread_raw(tid) : rt_thread_self();
if (thread != RT_NULL)
{
thread->tid_ref_count += 1;
}
lwp_mutex_release_safe(&tid_lock);
return thread;
}
void lwp_tid_dec_ref(rt_thread_t thread)
{
rt_thread_t susp_putter;
if (thread)
{
RT_ASSERT(rt_object_get_type(&thread->parent) == RT_Object_Class_Thread);
susp_putter = thread->susp_recycler;
lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
RT_ASSERT(thread->tid_ref_count > 0);
thread->tid_ref_count -= 1;
if (!thread->tid_ref_count && susp_putter)
{
rt_thread_resume(susp_putter);
}
lwp_mutex_release_safe(&tid_lock);
}
}
void lwp_tid_set_thread(int tid, rt_thread_t thread)
{
struct lwp_avl_struct *p;
lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
p = lwp_avl_find(tid, lwp_tid_root);
if (p)
{
RT_ASSERT(p->data == RT_NULL);
p->data = thread;
}
lwp_mutex_release_safe(&tid_lock);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,240 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-28 Jesven first version
* 2021-02-12 lizhirui add 64-bit support for lwp_brk
* 2023-09-19 Shell add lwp_user_memory_remap_to_kernel
*/
#ifndef __LWP_USER_MM_H__
#define __LWP_USER_MM_H__
#include <rthw.h>
#include <rtthread.h>
#ifdef ARCH_MM_MMU
#include <lwp.h>
#include <mmu.h>
#include <mm_aspace.h>
#include <mm_fault.h>
#include <mm_page.h>
#ifdef __cplusplus
extern "C" {
#endif
#define LWP_MAP_FLAG_NONE 0x0000
#define LWP_MAP_FLAG_NOCACHE 0x0001
#define LWP_MAP_FLAG_MAP_FIXED 0x00010000ul
#define LWP_MAP_FLAG_PREFETCH 0x00020000ul
/**
* @brief Map files or devices into memory
* It will create a new mapping in the virtual address space of the target lwp
*
* @param lwp target process
* @param addr address from user space
* @param length length in bytes of mapping
* @param prot protect attribution of mapping
* @param flags flags of control
* @param fd file descriptor
* @param pgoffset offset to fd in 4096 bytes unit
* @return void* the address is successful, otherwise return MAP_FAILED
*/
void* lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot, int flags, int fd, off_t pgoffset);
/**
* @brief Unmap memory region in user space
*
* @param lwp target process
* @param addr address to unmap
* @param length length in bytes of unmapping
* @return int errno
*/
int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length);
void *lwp_mremap(struct rt_lwp *lwp, void *old_address, size_t old_size,
size_t new_size, int flags, void *new_address);
/**
* @brief Test if address from user is accessible address by user
*
* @param lwp target process
* @param addr address from user space
* @param size the bytes to access
* @return int RT_FALSE/RT_TRUE
*/
int lwp_user_accessible_ext(struct rt_lwp *lwp, void *addr, size_t size);
/**
* @brief Test if address from user is accessible address by user
* Same as lwp_user_accessible_ext except that lwp is current lwp
*
* @param addr address from user space
* @param size the bytes to access
* @return int RT_FALSE/RT_TRUE
*/
int lwp_user_accessable(void *addr, size_t size);
/**
* @brief Copy n bytes data from src to dst.
* Same as std libc memcpy, except that both src and dst may come from
* user space. lwp_memcpy will test and select the implementation based
* on the memory attribution on run-time
*
* @param dst where the data writes to
* @param src where the data comes from
* @param size the bytes to copy
* @return void* the destination address
*/
void *lwp_memcpy(void * __restrict dst, const void * __restrict src, size_t size);
/**
* @brief memcpy from address in user address space to kernel space buffer
*
* @param lwp target process
* @param dst kernel space address where the data writes to
* @param src user space address where the data comes from
* @param size the bytes to copy
* @return size_t the bytes copied
*/
size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size);
/**
* @brief lwp_data_get except that lwp is current lwp
*
* @param dst kernel space address where the data writes to
* @param src user space address where the data comes from
* @param size the bytes to copy
* @return size_t the bytes copied
*/
size_t lwp_get_from_user(void *dst, void *src, size_t size);
/**
* @brief memcpy from kernel space buffer to address in user address space
*
* @param lwp target process
* @param dst user space address where the data writes to
* @param src kernel space address where the data comes from
* @param size the bytes to copy
* @return size_t the bytes copied
*/
size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size);
/**
* @brief lwp_data_put except that lwp is current lwp
*
* @param dst user space address where the data writes to
* @param src kernel space address where the data comes from
* @param size the bytes to copy
* @return size_t the bytes copied
*/
size_t lwp_put_to_user(void *dst, void *src, size_t size);
/**
* @brief memset to address in user address space
*
* @param lwp target process
* @param dst user space address where the data writes to
* @param c the value to write
* @param size the bytes to copy
* @return size_t the bytes written
*/
size_t lwp_data_set(struct rt_lwp *lwp, void *dst, int c, size_t size);
int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork);
void lwp_unmap_user_space(struct rt_lwp *lwp);
int lwp_unmap_user(struct rt_lwp *lwp, void *va);
void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, rt_bool_t text);
rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size);
/* check LWP_MAP_FLAG_* */
rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags);
void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, rt_bool_t cached);
int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va);
rt_base_t lwp_brk(void *addr);
size_t lwp_user_strlen(const char *s);
size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s);
size_t lwp_strlen(struct rt_lwp *lwp, const char *s);
int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp);
void lwp_data_cache_flush(struct rt_lwp *lwp, void *vaddr, size_t size);
static inline void *_lwp_v2p(struct rt_lwp *lwp, void *vaddr)
{
return rt_hw_mmu_v2p(lwp->aspace, vaddr);
}
static inline void *lwp_v2p(struct rt_lwp *lwp, void *vaddr)
{
RD_LOCK(lwp->aspace);
void *paddr = _lwp_v2p(lwp, vaddr);
RD_UNLOCK(lwp->aspace);
return paddr;
}
/**
* @brief Remapping user space memory region to kernel
*
* @warning the remapped region in kernel should be unmapped after usage
*
* @param lwp target process
* @param uaddr user space address where the data writes to
* @param length the bytes to redirect
* @return void * the redirection address in kernel space
*/
void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length);
rt_inline rt_size_t lwp_user_mm_flag_to_kernel(int flags)
{
rt_size_t k_flags = 0;
if (flags & MAP_FIXED)
k_flags |= MMF_MAP_FIXED;
if (flags & (MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS))
k_flags |= MMF_MAP_PRIVATE;
if (flags & MAP_SHARED)
k_flags |= MMF_MAP_SHARED;
return k_flags;
}
#ifndef MMU_MAP_U_ROCB
#define MMU_MAP_U_ROCB MMU_MAP_U_RWCB
#endif /* MMU_MAP_U_ROCB */
rt_inline rt_size_t lwp_user_mm_attr_to_kernel(int prot)
{
RT_UNUSED(prot);
rt_size_t k_attr = 0;
#ifdef LWP_USING_MPROTECT
if ((prot & PROT_EXEC) || (prot & PROT_WRITE) ||
((prot & PROT_READ) && (prot & PROT_WRITE)))
k_attr = MMU_MAP_U_RWCB;
else if (prot == PROT_NONE)
k_attr = MMU_MAP_K_RWCB;
else
k_attr = MMU_MAP_U_ROCB;
#else /* !LWP_USING_MPROTECT */
k_attr = MMU_MAP_U_RWCB;
#endif /* LWP_USING_MPROTECT */
return k_attr;
}
#ifdef __cplusplus
}
#endif
#endif
#endif /*__LWP_USER_MM_H__*/

View File

@ -0,0 +1,16 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-11-01 Jesven The first version
*/
#ifndef __PAGE_H__
#define __PAGE_H__
#include <mm_page.h>
#endif /*__PAGE_H__*/

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-10 RT-Thread The first version
* 2023-03-13 WangXiaoyao syscall metadata as structure
*/
#ifndef __SYSCALL_DATA_H__
#define __SYSCALL_DATA_H__
#include <rtthread.h>
#include <errno.h>
#include <stdlib.h>
/* signed ARCH related types */
typedef rt_base_t sysret_t;
struct rt_syscall_def
{
void *func;
char *name;
};
/**
* @brief signature for syscall, used to locate syscall metadata.
*
* We don't allocate an exclusive section in ELF like Linux do
* to avoid initializing necessary data by iterating that section,
* which increases system booting time. We signature a pointer
* just below each syscall entry in syscall table to make it
* easy to locate every syscall's metadata by using syscall id.
*/
#define SYSCALL_SIGN(func) { \
(void *)(func), \
&RT_STRINGIFY(func)[4], \
}
#define SET_ERRNO(no) rt_set_errno(-(no))
#define GET_ERRNO() ({int _errno = rt_get_errno(); _errno > 0 ? -_errno : _errno;})
#define _SYS_WRAP(func) ({int _ret = func; _ret < 0 ? GET_ERRNO() : _ret;})
rt_inline sysret_t lwp_errno_to_posix(rt_err_t error)
{
sysret_t posix_rc;
switch (labs(error))
{
case RT_EOK:
posix_rc = 0;
break;
case RT_ETIMEOUT:
posix_rc = -ETIMEDOUT;
break;
case RT_EINVAL:
posix_rc = -EINVAL;
break;
case RT_ENOENT:
posix_rc = -ENOENT;
break;
case RT_ENOSPC:
posix_rc = -ENOSPC;
break;
case RT_EPERM:
posix_rc = -EPERM;
break;
default:
posix_rc = -1;
break;
}
return posix_rc;
}
#endif /* __SYSCALL_DATA_H__ */

View File

@ -0,0 +1,14 @@
menuconfig LWP_USING_TERMINAL
bool "Terminal I/O Subsystem"
depends on RT_USING_SMART
default y
select RT_USING_SERIAL_BYPASS
if LWP_USING_TERMINAL
config LWP_PTY_MAX_PARIS_LIMIT
int "Max number of pty devices registered at the same time"
default 64
help
This upper limit is set to protect kernel memory from draining
out by the application if it keeps allocating pty devices.
endif

View File

@ -0,0 +1,741 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell Add compatible layer for FreeBSD
*/
#ifndef __LWP_TTY_BSD_PORTING_H__
#define __LWP_TTY_BSD_PORTING_H__
#include <rtthread.h>
#include <lwp_internal.h>
#define _KERNEL
#ifndef __unused
#define __unused __attribute__((__unused__))
#endif
/* functionability of bsd tty layer */
#if 0
#define USING_BSD_HOOK
#endif
/* Only for devfs d_close() flags. */
#define FLASTCLOSE O_DIRECTORY
#define FREVOKE 0x00200000
/*
* Output flags - software output processing
*/
#if !((OPOST | OLCUC | ONLCR) & 0x8)
#define ONOEOT 0x0008 /* discard EOT's (^D) on output) */
#endif
/*
* Kernel encoding of open mode; separate read and write bits that are
* independently testable: 1 greater than the above.
*
* XXX
* FREAD and FWRITE are excluded from the #ifdef _KERNEL so that TIOCFLUSH,
* which was documented to use FREAD/FWRITE, continues to work.
*/
#define FREAD 0x0001
#define FWRITE 0x0002
/*
* Flags to memory allocation functions.
*/
#define M_NOWAIT 0x0001 /* do not block */
#define M_WAITOK 0x0002 /* ok to block */
#define M_NORECLAIM 0x0080 /* do not reclaim after failure */
#define M_ZERO 0x0100 /* bzero the allocation */
#define M_NOVM 0x0200 /* don't ask VM for pages */
#define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
#define M_NODUMP 0x0800 /* don't dump pages in this allocation */
#define M_FIRSTFIT 0x1000 /* only for vmem, fast fit */
#define M_BESTFIT 0x2000 /* only for vmem, low fragmentation */
#define M_EXEC 0x4000 /* allocate executable space */
#define M_NEXTFIT 0x8000 /* only for vmem, follow cursor */
#define M_VERSION 2020110501
/*
* The INVARIANTS-enabled mtx_assert() functionality.
*
* The constants need to be defined for INVARIANT_SUPPORT infrastructure
* support as _mtx_assert() itself uses them and the latter implies that
* _mtx_assert() must build.
*/
#define MA_OWNED (1)
#define MA_NOTOWNED (2)
#define MA_RECURSED (4)
#define MA_NOTRECURSED (8)
/*
* Indentification of modem control signals. These definitions match
* the TIOCMGET definitions in <sys/ttycom.h> shifted a bit down, and
* that identity is enforced with CTASSERT at the bottom of kern/tty.c
* Both the modem bits and delta bits must fit in 16 bit.
*/
#define SER_DTR 0x0001 /* data terminal ready */
#define SER_RTS 0x0002 /* request to send */
#define SER_STX 0x0004 /* secondary transmit */
#define SER_SRX 0x0008 /* secondary receive */
#define SER_CTS 0x0010 /* clear to send */
#define SER_DCD 0x0020 /* data carrier detect */
#define SER_RI 0x0040 /* ring indicate */
#define SER_DSR 0x0080 /* data set ready */
#define SER_MASK_STATE 0x00ff
/*
* Flags for ioflag. (high 16 bits used to ask for read-ahead and
* help with write clustering)
* NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h
*/
#if 0
#define IO_UNIT 0x0001 /* do I/O as atomic unit */
#define IO_APPEND 0x0002 /* append write to end */
#endif /* not porting */
#define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */
#if 0
#define IO_NODELOCKED 0x0008 /* underlying node already locked */
#define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */
#define IO_VMIO 0x0020 /* data already in VMIO space */
#define IO_INVAL 0x0040 /* invalidate after I/O */
#define IO_SYNC 0x0080 /* do I/O synchronously */
#define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */
#define IO_NOREUSE 0x0200 /* VMIO data won't be reused */
#define IO_EXT 0x0400 /* operate on external attributes */
#define IO_NORMAL 0x0800 /* operate on regular data */
#define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */
#define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */
#define IO_RANGELOCKED 0x4000 /* range locked */
#define IO_DATASYNC 0x8000 /* do only data I/O synchronously */
#define IO_SEQMAX 0x7F /* seq heuristic max value */
#define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */
#endif /* not porting */
/** Used to distinguish between normal, callout, lock and init devices.
* Note: this is not used in smart system.
*/
#define TTYUNIT_INIT 0x1
#define TTYUNIT_LOCK 0x2
#define TTYUNIT_CALLOUT 0x4
/*
* TTY privileges.
*/
#define PRIV_TTY_CONSOLE 250 /* Set console to tty. */
#define PRIV_TTY_DRAINWAIT 251 /* Set tty drain wait time. */
#define PRIV_TTY_DTRWAIT 252 /* Set DTR wait on tty. */
#define PRIV_TTY_EXCLUSIVE 253 /* Override tty exclusive flag. */
#define _PRIV_TTY_PRISON 254 /* Removed. */
#define PRIV_TTY_STI 255 /* Simulate input on another tty. */
#define PRIV_TTY_SETA 256 /* Set tty termios structure. */
#define MPASS(ex) RT_ASSERT(ex)
#if !defined(MIN)
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#if !defined(MAX)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
#define curthread rt_thread_self()
#ifdef USING_BSD_HOOK
#define ttyhook_hashook(tp, hook) \
((tp)->t_hook != NULL && (tp)->t_hook->th_##hook != NULL)
#else
#define ttyhook_hashook(tp, hook) (RT_FALSE)
#endif
/* condvar API */
#include <rtdevice.h>
#define cv_init(cvp, name) rt_condvar_init(cvp, name)
#define cv_destroy(cvp) rt_condvar_detach(cvp)
#define cv_wait(cvp, mp) \
rt_condvar_timedwait(cvp, mp, RT_KILLABLE, RT_WAITING_FOREVER)
#define cv_wait_sig(cvp, mp) \
rt_condvar_timedwait(cvp, mp, RT_INTERRUPTIBLE, RT_WAITING_FOREVER)
#define cv_signal(cvp) rt_condvar_signal(cvp)
#define cv_broadcast(cvp) rt_condvar_broadcast(cvp)
#define cv_timedwait(cvp, mp, t) rt_condvar_timedwait(cvp, mp, RT_KILLABLE, t)
#define cv_timedwait_sig(cvp, mp, t) \
rt_condvar_timedwait(cvp, mp, RT_INTERRUPTIBLE, t)
struct lwp_tty;
struct uio;
/* TODO: just a place holder since devfs is not capable of doing this currently
*/
struct file
{
};
typedef rt_base_t sbintime_t;
typedef rt_ubase_t vm_offset_t;
typedef rt_base_t vm_ooffset_t;
typedef rt_ubase_t vm_paddr_t;
typedef rt_ubase_t vm_pindex_t;
typedef rt_ubase_t vm_size_t;
typedef char *rt_caddr_t;
/*
* The exact set of memory attributes is machine dependent. However,
* every machine is required to define VM_MEMATTR_DEFAULT and
* VM_MEMATTR_UNCACHEABLE.
*/
typedef char vm_memattr_t; /* memory attribute codes */
typedef int d_open_t(struct lwp_tty *tp, int oflags, int devtype,
struct rt_thread *td);
typedef int d_fdopen_t(struct lwp_tty *tp, int oflags, struct rt_thread *td,
struct file *fp);
typedef int d_close_t(struct lwp_tty *tp, int fflag, int devtype,
struct rt_thread *td);
#ifdef USING_BSD_DEVICE_STRATEGY
typedef void d_strategy_t(struct bio *bp);
#endif
typedef int d_ioctl_t(struct lwp_tty *tp, rt_ubase_t cmd, rt_caddr_t data,
int fflag, struct rt_thread *td);
typedef int d_read_t(struct lwp_tty *tp, struct uio *uio, int ioflag);
typedef int d_write_t(struct lwp_tty *tp, struct uio *uio, int ioflag);
typedef int d_poll_t(struct lwp_tty *tp, rt_pollreq_t *req,
struct rt_thread *td);
#ifdef USING_BSD_KNOTE
typedef int d_kqfilter_t(struct lwp_tty *tp, struct knote *kn);
#endif /* USING_BSD_KNOTE */
typedef int d_mmap_t(struct lwp_tty *tp, vm_ooffset_t offset, vm_paddr_t *paddr,
int nprot, vm_memattr_t *memattr);
#ifdef USING_BSD_MMAP_SINGLE
typedef int d_mmap_single_t(struct cdev *cdev, vm_ooffset_t *offset,
vm_size_t size, struct vm_object **object,
int nprot);
#endif /* USING_BSD_MMAP_SINGLE */
typedef void d_purge_t(struct lwp_tty *tp);
/*
* Character device switch table
*/
struct cdevsw
{
#ifdef USING_BSD_RAW_CDEVSW
int d_version;
u_int d_flags;
const char *d_name;
#endif /* USING_BSD_RAW_CDEVSW */
d_open_t *d_open;
d_fdopen_t *d_fdopen;
d_close_t *d_close;
d_read_t *d_read;
d_write_t *d_write;
d_ioctl_t *d_ioctl;
d_poll_t *d_poll;
d_mmap_t *d_mmap;
#ifdef USING_BSD_DEVICE_STRATEGY
d_strategy_t *d_strategy;
#endif /* USING_BSD_DEVICE_STRATEGY */
#ifdef USING_BSD_RAW_CDEVSW
void *d_spare0;
d_kqfilter_t *d_kqfilter;
d_purge_t *d_purge;
d_mmap_single_t *d_mmap_single;
int32_t d_spare1[3];
void *d_spare2[3];
/* These fields should not be messed with by drivers */
LIST_HEAD(, cdev) d_devs;
int d_spare3;
union
{
struct cdevsw *gianttrick;
SLIST_ENTRY(cdevsw) postfree_list;
} __d_giant;
#endif
};
struct iovec
{
void *iov_base; /* Base address. */
size_t iov_len; /* Length. */
};
enum uio_rw
{
UIO_READ,
UIO_WRITE
};
struct uio
{
struct iovec *uio_iov; /* scatter/gather list */
int uio_iovcnt; /* length of scatter/gather list */
off_t uio_offset; /* offset in target object */
ssize_t uio_resid; /* remaining bytes to process */
#ifdef USING_BSD_UIO
enum uio_seg uio_segflg; /* address space */
#endif
enum uio_rw uio_rw; /* operation */
#ifdef USING_BSD_UIO
struct rt_thread *uio_td; /* owner */
#endif /* USING_BSD_UIO */
};
#include <lwp_user_mm.h>
rt_inline int uiomove(void *operand, int n, struct uio *uio)
{
switch (uio->uio_rw)
{
case UIO_READ:
memcpy(uio->uio_iov->iov_base, operand, n);
break;
case UIO_WRITE:
memcpy(operand, uio->uio_iov->iov_base, n);
break;
default:
return -1;
}
uio->uio_iov->iov_base += n;
uio->uio_iov->iov_len--;
uio->uio_offset += n;
uio->uio_resid -= n;
return 0;
}
/* privileges checking: 0 if okay */
rt_inline int priv_check(struct rt_thread *td, int priv)
{
return 0;
}
/* Disable console redirection to a tty. */
rt_inline int constty_clear(struct lwp_tty *tp)
{
// rt_kprintf("\nTODO: %s unimplemented!\n", __func__);
return 0;
}
rt_inline int constty_set(struct lwp_tty *tp)
{
// rt_kprintf("\nTODO: %s unimplemented!\n", __func__);
return 0;
}
/**
* UMA (Universal Memory Allocator)
*/
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
typedef int (*uma_ctor)(void *mem, int size, void *arg, int flags);
typedef void (*uma_dtor)(void *mem, int size, void *arg);
typedef int (*uma_init)(void *mem, int size, int flags);
typedef void (*uma_fini)(void *mem, int size);
struct uma_zone
{
char *name;
int align;
int size;
};
/* Opaque type used as a handle to the zone */
typedef struct uma_zone *uma_zone_t;
rt_inline uma_zone_t uma_zcreate(char *name, int size, uma_ctor ctor,
uma_dtor dtor, uma_init zinit, uma_fini zfini,
int align, uint16_t flags)
{
uma_zone_t zone = rt_malloc(sizeof(struct uma_zone));
if (zone)
{
RT_ASSERT(ctor == RT_NULL);
RT_ASSERT(dtor == RT_NULL);
RT_ASSERT(zinit == RT_NULL);
RT_ASSERT(zfini == RT_NULL);
zone->size = size;
zone->name = name;
zone->align = align;
}
return zone;
}
rt_inline void *uma_zalloc(uma_zone_t zone, int flags)
{
void *buf = rt_malloc_align(zone->size, zone->align + 1);
if (buf)
rt_memset(buf, 0, sizeof(zone->size));
return buf;
}
rt_inline void uma_zfree(uma_zone_t zone, void *item)
{
rt_free_align(item);
}
/**
* bsd type of speed to linux type.
* Note: with switch blocks, compiler can generate the optimized version for us
*/
#include <termios.h>
rt_inline long bsd_speed_to_integer(speed_t speed)
{
long speed_value;
switch (speed)
{
case B0:
speed_value = 0;
break;
case B50:
speed_value = 50;
break;
case B75:
speed_value = 75;
break;
case B110:
speed_value = 110;
break;
case B134:
speed_value = 134;
break;
case B150:
speed_value = 150;
break;
case B200:
speed_value = 200;
break;
case B300:
speed_value = 300;
break;
case B600:
speed_value = 600;
break;
case B1200:
speed_value = 1200;
break;
case B1800:
speed_value = 1800;
break;
case B2400:
speed_value = 2400;
break;
case B4800:
speed_value = 4800;
break;
case B9600:
speed_value = 9600;
break;
case B19200:
speed_value = 19200;
break;
case B38400:
speed_value = 38400;
break;
case B57600:
speed_value = 57600;
break;
case B115200:
speed_value = 115200;
break;
case B230400:
speed_value = 230400;
break;
case B460800:
speed_value = 460800;
break;
case B500000:
speed_value = 500000;
break;
case B576000:
speed_value = 576000;
break;
case B921600:
speed_value = 921600;
break;
case B1000000:
speed_value = 1000000;
break;
case B1152000:
speed_value = 1152000;
break;
case B1500000:
speed_value = 1500000;
break;
case B2000000:
speed_value = 2000000;
break;
case B2500000:
speed_value = 2500000;
break;
case B3000000:
speed_value = 3000000;
break;
case B3500000:
speed_value = 3500000;
break;
case B4000000:
speed_value = 4000000;
break;
default:
speed_value = -1; // invalid speed
break;
}
return speed_value;
}
/* time.h */
/* Operations on timevals. */
#define timevalclear(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = 0)
#define timevalisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#define timevalcmp(tvp, uvp, cmp) \
(((tvp)->tv_sec == (uvp)->tv_sec) ? ((tvp)->tv_usec cmp(uvp)->tv_usec) \
: ((tvp)->tv_sec cmp(uvp)->tv_sec))
rt_inline void getmicrotime(struct timeval *now)
{
gettimeofday(now, RT_NULL);
}
rt_inline void timevalfix(struct timeval *tv)
{
if (tv->tv_usec < 0)
{
tv->tv_sec--;
tv->tv_usec += 1000000;
}
if (tv->tv_usec >= 1000000)
{
tv->tv_sec++;
tv->tv_usec -= 1000000;
}
}
rt_inline void timevaladd(struct timeval *op1, const struct timeval *op2)
{
op1->tv_sec += op2->tv_sec;
op1->tv_usec += op2->tv_usec;
timevalfix(op1);
}
rt_inline void timevalsub(struct timeval *op1, const struct timeval *op2)
{
op1->tv_sec -= op2->tv_sec;
op1->tv_usec -= op2->tv_usec;
timevalfix(op1);
}
rt_inline rt_tick_t tvtohz(struct timeval *tv)
{
rt_tick_t rc;
rc = tv->tv_sec * RT_TICK_PER_SECOND;
rc += tv->tv_usec * RT_TICK_PER_SECOND / MICROSECOND_PER_SECOND;
return rc;
}
/* ioctl */
#define _BSD_TIOCTL(val) ((val) << 16)
enum bsd_ioctl_cmd
{
BSD_TIOCDRAIN = 1,
BSD_TIOCFLUSH,
BSD_TIOCSTART,
BSD_TIOCSTOP,
BSD_TIOCSTAT,
BSD_TIOCGDRAINWAIT,
BSD_TIOCSDRAINWAIT,
BSD_TIOCSDTR,
BSD_TIOCCDTR,
};
#ifndef TIOCGETA /* get termios struct */
#define TIOCGETA TCGETS
#endif
#ifndef TIOCSETA /* set termios struct */
#define TIOCSETA TCSETS
#endif
#ifndef TIOCSETAW /* drain output, set */
#define TIOCSETAW TCSETSW
#endif
#ifndef TIOCSETAF /* drn out, fls in, set */
#define TIOCSETAF TCSETSF
#endif
#ifndef TIOCDRAIN /* wait till output drained */
#define TIOCDRAIN _BSD_TIOCTL(BSD_TIOCDRAIN)
#endif
#ifndef TIOCFLUSH /* flush buffers */
#define TIOCFLUSH _BSD_TIOCTL(BSD_TIOCFLUSH)
#endif
#ifndef TIOCSTART /* start output, like ^Q */
#define TIOCSTART _BSD_TIOCTL(BSD_TIOCSTART)
#endif
#ifndef TIOCSTOP /* stop output, like ^S */
#define TIOCSTOP _BSD_TIOCTL(BSD_TIOCSTOP)
#endif
#ifndef TIOCSTAT /* simulate ^T status message */
#define TIOCSTAT _BSD_TIOCTL(BSD_TIOCSTAT)
#endif
#ifndef TIOCGDRAINWAIT /* get ttywait timeout */
#define TIOCGDRAINWAIT _BSD_TIOCTL(BSD_TIOCGDRAINWAIT)
#endif
#ifndef TIOCSDRAINWAIT /* set ttywait timeout */
#define TIOCSDRAINWAIT _BSD_TIOCTL(BSD_TIOCSDRAINWAIT)
#endif
#ifndef TIOCSDTR /* set data terminal ready */
#define TIOCSDTR _BSD_TIOCTL(BSD_TIOCSDTR)
#endif
#ifndef TIOCCDTR /* clear data terminal ready */
#define TIOCCDTR _BSD_TIOCTL(BSD_TIOCCDTR)
#endif
#define ENOIOCTL ENOSYS
#define NO_PID -1
/* line discipline */
#define TTYDISC 0 /* termios tty line discipline */
#define SLIPDISC 4 /* serial IP discipline */
#define PPPDISC 5 /* PPP discipline */
#define NETGRAPHDISC 6 /* Netgraph tty node discipline */
#define H4DISC 7 /* Netgraph Bluetooth H4 discipline */
/*
* Control flags - hardware control of terminal
*/
#if __BSD_VISIBLE
#define CIGNORE 0x00000001 /* ignore control flags */
#define CCTS_OFLOW 0x00010000 /* CTS flow control of output */
#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW)
#define CRTS_IFLOW 0x00020000 /* RTS flow control of input */
#define CDTR_IFLOW 0x00040000 /* DTR flow control of input */
#define CDSR_OFLOW 0x00080000 /* DSR flow control of output */
#define CCAR_OFLOW 0x00100000 /* DCD flow control of output */
#define CNO_RTSDTR 0x00200000 /* Do not assert RTS or DTR automatically */
#else
#define CIGNORE 0 /* ignore control flags */
#define CCTS_OFLOW 0 /* CTS flow control of output */
#define CRTS_IFLOW 0 /* RTS flow control of input */
#define CDTR_IFLOW 0 /* DTR flow control of input */
#define CDSR_OFLOW 0 /* DSR flow control of output */
#define CCAR_OFLOW 0 /* DCD flow control of output */
#define CNO_RTSDTR 0 /* Do not assert RTS or DTR automatically */
#endif
#ifndef CRTSCTS
#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW)
#endif
#ifndef howmany
#define howmany(x, y) (((x) + ((y)-1)) / (y))
#endif
struct ucred
{
};
#define NOCRED ((struct ucred *)0) /* no credential available */
#define FSCRED ((struct ucred *)-1) /* filesystem credential */
/* convert from open() flags to/from fflags; convert O_RD/WR to FREAD/FWRITE */
#include <fcntl.h>
#define FFLAGS(oflags) ((oflags)&O_EXEC ? (oflags) : (oflags) + 1)
#define OFLAGS(fflags) \
(((fflags) & (O_EXEC | O_PATH)) != 0 ? (fflags) : (fflags)-1)
typedef int fo_rdwr_t(struct lwp_tty *tp, struct uio *uio,
struct ucred *active_cred, int flags,
struct rt_thread *td);
typedef int fo_truncate_t(struct lwp_tty *tp, off_t length,
struct ucred *active_cred, struct rt_thread *td);
typedef int fo_ioctl_t(struct lwp_tty *tp, rt_ubase_t com, void *data,
struct ucred *active_cred, int fflags, struct rt_thread *td);
typedef int fo_poll_t(struct lwp_tty *tp, struct rt_pollreq *rq, struct ucred *active_cred,
struct rt_thread *td);
typedef int fo_stat_t(struct lwp_tty *tp, struct stat *sb,
struct ucred *active_cred);
typedef int fo_close_t(struct lwp_tty *tp, struct rt_thread *td);
#ifdef USING_BSD_FO_EXT
typedef int fo_chmod_t(struct file *fp, mode_t mode, struct ucred *active_cred,
struct rt_thread *td);
typedef int fo_chown_t(struct file *fp, uid_t uid, gid_t gid,
struct ucred *active_cred, struct rt_thread *td);
typedef int fo_sendfile_t(struct file *fp, int sockfd, struct uio *hdr_uio,
struct uio *trl_uio, off_t offset, size_t nbytes,
off_t *sent, int flags, struct rt_thread *td);
typedef int fo_seek_t(struct file *fp, off_t offset, int whence,
struct rt_thread *td);
typedef int fo_kqfilter_t(struct file *fp, struct knote *kn);
typedef int fo_fill_kinfo_t(struct file *fp, struct kinfo_file *kif,
struct filedesc *fdp);
typedef int fo_mmap_t(struct file *fp, vm_map_t map, vm_offset_t *addr,
vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot,
int flags, vm_ooffset_t foff, struct rt_thread *td);
typedef int fo_aio_queue_t(struct file *fp, struct kaiocb *job);
typedef int fo_add_seals_t(struct file *fp, int flags);
typedef int fo_get_seals_t(struct file *fp, int *flags);
typedef int fo_fallocate_t(struct file *fp, off_t offset, off_t len,
struct rt_thread *td);
typedef int fo_fspacectl_t(struct file *fp, int cmd, off_t *offset,
off_t *length, int flags, struct ucred *active_cred,
struct rt_thread *td);
typedef int fo_spare_t(struct file *fp);
#endif /* USING_BSD_FO_EXT */
typedef int fo_flags_t;
struct bsd_fileops
{
fo_rdwr_t *fo_read;
fo_rdwr_t *fo_write;
fo_truncate_t *fo_truncate;
fo_ioctl_t *fo_ioctl;
fo_poll_t *fo_poll;
fo_stat_t *fo_stat;
fo_close_t *fo_close;
#ifdef USING_BSD_FO_EXT
fo_chmod_t *fo_chmod;
fo_chown_t *fo_chown;
fo_sendfile_t *fo_sendfile;
fo_seek_t *fo_seek;
fo_kqfilter_t *fo_kqfilter;
fo_fill_kinfo_t *fo_fill_kinfo;
fo_mmap_t *fo_mmap;
fo_aio_queue_t *fo_aio_queue;
fo_add_seals_t *fo_add_seals;
fo_get_seals_t *fo_get_seals;
fo_fallocate_t *fo_fallocate;
fo_fspacectl_t *fo_fspacectl;
fo_spare_t *fo_spares[8]; /* Spare slots */
#endif
fo_flags_t fo_flags; /* DFLAG_* below */
};
#define DFLAG_PASSABLE 0x01 /* may be passed via unix sockets. */
#define DFLAG_SEEKABLE 0x02 /* seekable / nonsequential */
#endif /* __LWP_TTY_BSD_PORTING_H__ */

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_TTYDISC_H_
#define _SYS_TTYDISC_H_
#ifndef __LWP_TERMINAL_H__
#error "can only be included through <terminal.h>"
#endif /* !__LWP_TERMINAL_H__ */
#include <rtdef.h>
struct rt_wqueue;
struct rt_thread;
struct lwp_tty;
struct uio;
/* Top half routines. */
void ttydisc_open(struct lwp_tty *tp);
void ttydisc_close(struct lwp_tty *tp);
int ttydisc_read(struct lwp_tty *tp, struct uio *uio, int ioflag);
int ttydisc_write(struct lwp_tty *tp, struct uio *uio, int ioflag);
void ttydisc_optimize(struct lwp_tty *tp);
/* Bottom half routines. */
void ttydisc_modem(struct lwp_tty *tp, int open);
#define ttydisc_can_bypass(tp) ((tp)->t_flags & TF_BYPASS)
int ttydisc_rint(struct lwp_tty *tp, char c, int flags);
size_t ttydisc_rint_simple(struct lwp_tty *tp, const void *buf, size_t len);
size_t ttydisc_rint_bypass(struct lwp_tty *tp, const void *buf, size_t len);
void ttydisc_rint_done(struct lwp_tty *tp);
size_t ttydisc_rint_poll(struct lwp_tty *tp);
size_t ttydisc_getc(struct lwp_tty *tp, void *buf, size_t len);
int ttydisc_getc_uio(struct lwp_tty *tp, struct uio *uio);
size_t ttydisc_getc_poll(struct lwp_tty *tp);
/* Error codes for ttydisc_rint(). */
#define TRE_FRAMING 0x01
#define TRE_PARITY 0x02
#define TRE_OVERRUN 0x04
#define TRE_BREAK 0x08
#endif /* !_SYS_TTYDISC_H_ */

View File

@ -0,0 +1,180 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "bsd_porting.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_TTYQUEUE_H_
#define _SYS_TTYQUEUE_H_
#ifndef __LWP_TERMINAL_H__
#error "can only be included through <teminal.h>"
#endif /* !__LWP_TERMINAL_H__ */
struct lwp_tty;
struct ttyinq_block;
struct ttyoutq_block;
struct uio;
/* Data input queue. */
struct ttyinq
{
struct ttyinq_block *ti_firstblock;
struct ttyinq_block *ti_startblock;
struct ttyinq_block *ti_reprintblock;
struct ttyinq_block *ti_lastblock;
unsigned int ti_begin;
unsigned int ti_linestart;
unsigned int ti_reprint;
unsigned int ti_end;
unsigned int ti_nblocks;
unsigned int ti_quota;
};
#define TTYINQ_DATASIZE 128
/* Data output queue. */
struct ttyoutq
{
struct ttyoutq_block *to_firstblock;
struct ttyoutq_block *to_lastblock;
unsigned int to_begin;
unsigned int to_end;
unsigned int to_nblocks;
unsigned int to_quota;
};
#define TTYOUTQ_DATASIZE (256 - sizeof(struct ttyoutq_block *))
/* Input queue handling routines. */
int ttyinq_setsize(struct ttyinq *ti, struct lwp_tty *tp, size_t len);
void ttyinq_free(struct ttyinq *ti);
int ttyinq_read_uio(struct ttyinq *ti, struct lwp_tty *tp, struct uio *uio,
size_t readlen, size_t flushlen);
size_t ttyinq_write(struct ttyinq *ti, const void *buf, size_t len, int quote);
int ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t len,
int quote);
void ttyinq_canonicalize(struct ttyinq *ti);
size_t ttyinq_findchar(struct ttyinq *ti, const char *breakc, size_t maxlen,
char *lastc);
void ttyinq_flush(struct ttyinq *ti);
int ttyinq_peekchar(struct ttyinq *ti, char *c, int *quote);
void ttyinq_unputchar(struct ttyinq *ti);
void ttyinq_reprintpos_set(struct ttyinq *ti);
void ttyinq_reprintpos_reset(struct ttyinq *ti);
rt_inline size_t ttyinq_getsize(struct ttyinq *ti)
{
return (ti->ti_nblocks * TTYINQ_DATASIZE);
}
rt_inline size_t ttyinq_getallocatedsize(struct ttyinq *ti)
{
return (ti->ti_quota * TTYINQ_DATASIZE);
}
rt_inline size_t ttyinq_bytesleft(struct ttyinq *ti)
{
size_t len;
/* Make sure the usage never exceeds the length. */
len = ti->ti_nblocks * TTYINQ_DATASIZE;
MPASS(len >= ti->ti_end);
return (len - ti->ti_end);
}
rt_inline size_t ttyinq_bytescanonicalized(struct ttyinq *ti)
{
MPASS(ti->ti_begin <= ti->ti_linestart);
return (ti->ti_linestart - ti->ti_begin);
}
rt_inline size_t ttyinq_bytesline(struct ttyinq *ti)
{
MPASS(ti->ti_linestart <= ti->ti_end);
return (ti->ti_end - ti->ti_linestart);
}
/* Input buffer iteration. */
typedef void ttyinq_line_iterator_t(void *data, char c, int flags);
void ttyinq_line_iterate_from_linestart(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data);
void ttyinq_line_iterate_from_reprintpos(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data);
/* Output queue handling routines. */
void ttyoutq_flush(struct ttyoutq *to);
int ttyoutq_setsize(struct ttyoutq *to, struct lwp_tty *tp, size_t len);
void ttyoutq_free(struct ttyoutq *to);
size_t ttyoutq_read(struct ttyoutq *to, void *buf, size_t len);
int ttyoutq_read_uio(struct ttyoutq *to, struct lwp_tty *tp, struct uio *uio);
size_t ttyoutq_write(struct ttyoutq *to, const void *buf, size_t len);
int ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t len);
rt_inline size_t ttyoutq_getsize(struct ttyoutq *to)
{
return (to->to_nblocks * TTYOUTQ_DATASIZE);
}
rt_inline size_t ttyoutq_getallocatedsize(struct ttyoutq *to)
{
return (to->to_quota * TTYOUTQ_DATASIZE);
}
rt_inline size_t ttyoutq_bytesleft(struct ttyoutq *to)
{
size_t len;
/* Make sure the usage never exceeds the length. */
len = to->to_nblocks * TTYOUTQ_DATASIZE;
MPASS(len >= to->to_end);
return (len - to->to_end);
}
rt_inline size_t ttyoutq_bytesused(struct ttyoutq *to)
{
return (to->to_end - to->to_begin);
}
#endif /* !_SYS_TTYQUEUE_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,713 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* (tty_compat.c)
* The compatible layer which interacts with process management core (lwp)
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "../tty_config.h"
#include "../tty_internal.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1994-1995 Søren Schmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* is the tty and session leader already binding ? */
static rt_bool_t _is_already_binding(lwp_tty_t tp, rt_lwp_t p)
{
rt_bool_t rc;
rt_processgroup_t pgrp = p->pgrp;
/* lwp is already locked */
RT_ASSERT(pgrp);
/* Note: pgrp->session is constant after process group is created */
if (tp->t_session && tp->t_session == pgrp->session)
{
rc = RT_TRUE;
}
else
{
rc = RT_FALSE;
}
return rc;
}
static rt_bool_t _is_tty_or_sess_busy(lwp_tty_t tp, rt_lwp_t p)
{
rt_bool_t rc;
rt_session_t sess = p->pgrp->session;
SESS_LOCK(sess);
if (sess->ctty)
{
rc = RT_TRUE;
}
else if (tp->t_session)
{
/**
* TODO: allow TTY stolen if the sess leader is killed while resource
* had not been collected
*/
if (tp->t_session->leader == RT_NULL)
rc = RT_FALSE;
else
rc = RT_TRUE;
}
else
{
rc = RT_FALSE;
}
SESS_UNLOCK(sess);
return rc;
}
int lwp_tty_bg_stop(struct lwp_tty *tp, struct rt_condvar *cv)
{
int error;
int revokecnt = tp->t_revokecnt;
rt_lwp_t self_lwp;
rt_thread_t header_thr;
rt_thread_t cur_thr = rt_thread_self();
int jobctl_stopped;
self_lwp = cur_thr->lwp;
RT_ASSERT(self_lwp);
jobctl_stopped = self_lwp->jobctl_stopped;
tty_lock_assert(tp, MA_OWNED | MA_NOTRECURSED);
MPASS(!tty_gone(tp));
LWP_LOCK(self_lwp);
header_thr = rt_list_entry(self_lwp->t_grp.prev, struct rt_thread, sibling);
if (!jobctl_stopped && header_thr == cur_thr &&
cur_thr->sibling.prev == &self_lwp->t_grp)
{
/* update lwp status */
jobctl_stopped = self_lwp->jobctl_stopped = RT_TRUE;
}
LWP_UNLOCK(self_lwp);
error = cv_wait(cv, tp->t_mtx);
if (jobctl_stopped)
{
self_lwp->jobctl_stopped = RT_FALSE;
}
/* Bail out when the device slipped away. */
if (tty_gone(tp))
return -ENXIO;
/* Restart the system call when we may have been revoked. */
if (tp->t_revokecnt != revokecnt)
return -ERESTART;
return error;
}
/* process management */
int lwp_tty_set_ctrl_proc(lwp_tty_t tp, rt_thread_t td)
{
int rc = -1;
struct rt_lwp *p = td->lwp;
tty_unlock(tp);
LWP_LOCK(p);
tty_lock(tp);
if (is_sess_leader(p))
{
if (_is_already_binding(tp, p))
{
rc = 0;
}
else if (_is_tty_or_sess_busy(tp, p))
{
rc = -EPERM;
}
else
{
/**
* Binding controlling process
* note: p->pgrp is protected by lwp lock;
* pgrp->session is always constant.
*/
tp->t_session = p->pgrp->session;
tp->t_session->ctty = tp;
tp->t_sessioncnt++;
/* Assign foreground process group */
tp->t_pgrp = p->pgrp;
p->term_ctrlterm = RT_TRUE;
LOG_D("%s(sid=%d)", __func__, tp->t_session->sid);
rc = 0;
}
}
else
{
rc = -EPERM;
}
LWP_UNLOCK(p);
return rc;
}
int lwp_tty_assign_foreground(lwp_tty_t tp, rt_thread_t td, int pgid)
{
struct rt_processgroup *pg;
rt_lwp_t cur_lwp = td->lwp;
tty_unlock(tp);
pg = lwp_pgrp_find_and_inc_ref(pgid);
if (pg == NULL || cur_lwp == NULL)
{
tty_lock(tp);
return -EPERM;
}
else
{
PGRP_LOCK(pg);
if (pg->sid != cur_lwp->sid)
{
PGRP_UNLOCK(pg);
lwp_pgrp_dec_ref(pg);
LOG_D("%s: NoPerm current process (pid=%d, pgid=%d, sid=%d), "
"tagget group (pgid=%d, sid=%d)", __func__,
cur_lwp->pid, cur_lwp->pgid, cur_lwp->sid, pgid, pg->sid);
tty_lock(tp);
return -EPERM;
}
}
tty_lock(tp);
/**
* Determine if this TTY is the controlling TTY after
* relocking the TTY.
*/
if (!tty_is_ctty(tp, td->lwp))
{
PGRP_UNLOCK(pg);
LOG_D("%s: NoCTTY current process (pid=%d, pgid=%d, sid=%d), "
"tagget group (pgid=%d, sid=%d)", __func__,
cur_lwp->pid, cur_lwp->pgid, cur_lwp->sid, pgid, pg->sid);
return -ENOTTY;
}
tp->t_pgrp = pg;
PGRP_UNLOCK(pg);
lwp_pgrp_dec_ref(pg);
/* Wake up the background process groups. */
cv_broadcast(&tp->t_bgwait);
LOG_D("%s: Foreground group %p (pgid=%d)", __func__, tp->t_pgrp,
tp->t_pgrp ? tp->t_pgrp->pgid : -1);
return 0;
}
/**
* Signalling processes.
*/
void lwp_tty_signal_sessleader(struct lwp_tty *tp, int sig)
{
struct rt_lwp *p;
struct rt_session *s;
tty_assert_locked(tp);
MPASS(sig >= 1 && sig < _LWP_NSIG);
/* Make signals start output again. */
tp->t_flags &= ~TF_STOPPED;
tp->t_termios.c_lflag &= ~FLUSHO;
/**
* Load s.leader exactly once to avoid race where s.leader is
* set to NULL by a concurrent invocation of killjobc() by the
* session leader. Note that we are not holding t_session's
* lock for the read.
*/
if ((s = tp->t_session) != NULL &&
(p = (void *)rt_atomic_load((rt_atomic_t *)&s->leader)) != NULL)
{
lwp_signal_kill(p, sig, SI_KERNEL, 0);
}
}
void lwp_tty_signal_pgrp(struct lwp_tty *tp, int sig)
{
tty_assert_locked(tp);
MPASS(sig >= 1 && sig < _LWP_NSIG);
/* Make signals start output again. */
tp->t_flags &= ~TF_STOPPED;
tp->t_termios.c_lflag &= ~FLUSHO;
#ifdef USING_BSD_SIGINFO
if (sig == SIGINFO && !(tp->t_termios.c_lflag & NOKERNINFO))
tty_info(tp);
#endif /* USING_BSD_SIGINFO */
if (tp->t_pgrp != NULL)
{
PGRP_LOCK(tp->t_pgrp);
lwp_pgrp_signal_kill(tp->t_pgrp, sig, SI_KERNEL, 0);
PGRP_UNLOCK(tp->t_pgrp);
}
}
/* bsd_ttydev_methods.d_ioctl */
rt_inline size_t _copy_to_user(void *to, void *from, size_t n)
{
return lwp_put_to_user(to, from, n) == n ? 0 : -EFAULT;
}
rt_inline size_t _copy_from_user(void *to, void *from, size_t n)
{
return lwp_get_from_user(to, from, n) == n ? 0 : -EFAULT;
}
static void termios_to_termio(struct termios *tios, struct termio *tio)
{
memset(tio, 0, sizeof(*tio));
tio->c_iflag = tios->c_iflag;
tio->c_oflag = tios->c_oflag;
tio->c_cflag = tios->c_cflag;
tio->c_lflag = tios->c_lflag;
tio->c_line = tios->c_line;
memcpy(tio->c_cc, tios->c_cc, NCC);
}
static void termio_to_termios(struct termio *tio, struct termios *tios)
{
int i;
tios->c_iflag = tio->c_iflag;
tios->c_oflag = tio->c_oflag;
tios->c_cflag = tio->c_cflag;
tios->c_lflag = tio->c_lflag;
for (i = NCC; i < NCCS; i++)
tios->c_cc[i] = _POSIX_VDISABLE;
memcpy(tios->c_cc, tio->c_cc, NCC);
}
#define IOCTL(cmd, data, fflags, td) \
bsd_ttydev_methods.d_ioctl(tp, cmd, data, fflags, td)
int lwp_tty_ioctl_adapter(lwp_tty_t tp, int cmd, int oflags, void *args, rt_thread_t td)
{
long fflags = FFLAGS(oflags);
struct termios tios;
struct termio tio;
int error;
LOG_D("%s(cmd=0x%x, args=%p)", __func__, cmd, args);
switch (cmd & 0xffff)
{
case TCGETS:
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags, td);
if (error)
break;
cfsetospeed(&tios, tios.__c_ispeed);
error = _copy_to_user(args, &tios, sizeof(tios));
break;
case TCSETS:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
tios.__c_ispeed = tios.__c_ospeed = cfgetospeed(&tios);
error = (IOCTL(TIOCSETA, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETSW:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
error = (IOCTL(TIOCSETAW, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETSF:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
error = (IOCTL(TIOCSETAF, (rt_caddr_t)&tios, fflags, td));
break;
case TCGETA:
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags, td);
if (error)
break;
termios_to_termio(&tios, &tio);
error = _copy_to_user((void *)args, &tio, sizeof(tio));
break;
case TCSETA:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETA, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETAW:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETAW, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETAF:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETAF, (rt_caddr_t)&tios, fflags, td));
break;
case TCSBRK:
if (args != 0)
{
/**
* Linux manual: SVr4, UnixWare, Solaris, and Linux treat
* tcsendbreak(fd,arg) with nonzero arg like tcdrain(fd).
*/
error = IOCTL(TIOCDRAIN, (rt_caddr_t)&tios, fflags, td);
}
else
{
/**
* Linux manual: If the terminal is using asynchronous serial
* data transmission, and arg is zero, then send a break (a
* stream of zero bits) for between 0.25 and 0.5 seconds.
*/
LOG_D("%s: ioctl TCSBRK arg 0 not implemented", __func__);
error = -ENOSYS;
}
break;
#ifdef USING_BSD_IOCTL_EXT
/* Software flow control */
case TCXONC: {
switch (args->arg)
{
case TCOOFF:
args->cmd = TIOCSTOP;
break;
case TCOON:
args->cmd = TIOCSTART;
break;
case TCIOFF:
case TCION: {
int c;
struct write_args wr;
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags,
td);
if (error)
break;
fdrop(fp, td);
c = (args->arg == TCIOFF) ? VSTOP : VSTART;
c = tios.c_cc[c];
if (c != _POSIX_VDISABLE)
{
wr.fd = args->fd;
wr.buf = &c;
wr.nbyte = sizeof(c);
return (sys_write(td, &wr));
}
else
return 0;
}
default:
fdrop(fp, td);
return -EINVAL;
}
args->arg = 0;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
}
#endif /* USING_BSD_IOCTL_EXT */
case TCFLSH: {
int val;
error = 0;
switch ((rt_base_t)args)
{
case TCIFLUSH:
val = FREAD;
break;
case TCOFLUSH:
val = FWRITE;
break;
case TCIOFLUSH:
val = FREAD | FWRITE;
break;
default:
error = -EINVAL;
break;
}
if (!error)
error = (IOCTL(TIOCFLUSH, (rt_caddr_t)&val, fflags, td));
break;
}
#ifdef USING_BSD_IOCTL_EXT
case TIOCEXCL:
args->cmd = TIOCEXCL;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCNXCL:
args->cmd = TIOCNXCL;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/* Controlling terminal */
case TIOCSCTTY:
case TIOCNOTTY:
/* Process group and session ID */
case TIOCGPGRP:
case TIOCSPGRP:
case TIOCGSID:
/* TIOCOUTQ */
/* TIOCSTI */
case TIOCGWINSZ:
case TIOCSWINSZ:
error = IOCTL(cmd, (rt_caddr_t)args, fflags, td);
break;
#ifdef USING_BSD_IOCTL_EXT
case TIOCMGET:
args->cmd = TIOCMGET;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMBIS:
args->cmd = TIOCMBIS;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMBIC:
args->cmd = TIOCMBIC;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMSET:
args->cmd = TIOCMSET;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/* TIOCGSOFTCAR */
/* TIOCSSOFTCAR */
case FIONREAD: /* TIOCINQ */
error = (IOCTL(FIONREAD, args, fflags, td));
break;
#ifdef USING_BSD_IOCTL_EXT
/* TIOCLINUX */
case TIOCCONS:
args->cmd = TIOCCONS;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCGSERIAL: {
struct linux_serial_struct lss;
bzero(&lss, sizeof(lss));
lss.type = PORT_16550A;
lss.flags = 0;
lss.close_delay = 0;
error = copyout(&lss, (void *)args->arg, sizeof(lss));
break;
}
case TIOCSSERIAL: {
struct linux_serial_struct lss;
error = copyin((void *)args->arg, &lss, sizeof(lss));
if (error)
break;
/* XXX - It really helps to have an implementation that
* does nothing. NOT!
*/
error = 0;
break;
}
case TIOCPKT:
args->cmd = TIOCPKT;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIONBIO:
args->cmd = FIONBIO;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCSETD: {
int line;
switch (args->arg)
{
case N_TTY:
line = TTYDISC;
break;
case N_SLIP:
line = SLIPDISC;
break;
case N_PPP:
line = PPPDISC;
break;
default:
fdrop(fp, td);
return -EINVAL;
}
error = (ioctl_emit(TIOCSETD, (rt_caddr_t)&line, fflags, td));
break;
}
case TIOCGETD: {
int linux_line;
int bsd_line = TTYDISC;
error =
ioctl_emit(TIOCGETD, (rt_caddr_t)&bsd_line, fflags, td);
if (error)
break;
switch (bsd_line)
{
case TTYDISC:
linux_line = N_TTY;
break;
case SLIPDISC:
linux_line = N_SLIP;
break;
case PPPDISC:
linux_line = N_PPP;
break;
default:
fdrop(fp, td);
return -EINVAL;
}
error = (copyout(&linux_line, (void *)args->arg, sizeof(int)));
break;
}
/* TCSBRKP */
/* TIOCTTYGSTRUCT */
case FIONCLEX:
args->cmd = FIONCLEX;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIOCLEX:
args->cmd = FIOCLEX;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIOASYNC:
args->cmd = FIOASYNC;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
/* TIOCSERCONFIG */
/* TIOCSERGWILD */
/* TIOCSERSWILD */
/* TIOCGLCKTRMIOS */
/* TIOCSLCKTRMIOS */
case TIOCSBRK:
args->cmd = TIOCSBRK;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCCBRK:
args->cmd = TIOCCBRK;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCGPTN: {
int nb;
error = ioctl_emit(TIOCGPTN, (rt_caddr_t)&nb, fflags, td);
if (!error)
error = copyout(&nb, (void *)args->arg, sizeof(int));
break;
}
case TIOCGPTPEER:
linux_msg(td, "unsupported ioctl TIOCGPTPEER");
error = -ENOIOCTL;
break;
case TIOCSPTLCK:
/*
* Our unlockpt() does nothing. Check that fd refers
* to a pseudo-terminal master device.
*/
args->cmd = TIOCPTMASTER;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/**
* those are for current implementation of devfs, and we dont want to
* log them
*/
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_GETFD:
case F_SETFD:
case F_GETFL:
case F_SETFL:
/* fall back to fs */
error = -ENOIOCTL;
break;
default:
LOG_I("%s: unhandle commands 0x%x", __func__, cmd);
error = -ENOSYS;
break;
}
return (error);
}

View File

@ -0,0 +1,507 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "../bsd_porting.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TTY input queue buffering.
*
* Unlike the output queue, the input queue has more features that are
* needed to properly implement various features offered by the TTY
* interface:
*
* - Data can be removed from the tail of the queue, which is used to
* implement backspace.
* - Once in a while, input has to be `canonicalized'. When ICANON is
* turned on, this will be done after a CR has been inserted.
* Otherwise, it should be done after any character has been inserted.
* - The input queue can store one bit per byte, called the quoting bit.
* This bit is used by TTYDISC to make backspace work on quoted
* characters.
*
* In most cases, there is probably less input than output, so unlike
* the outq, we'll stick to 128 byte blocks here.
*/
static int ttyinq_flush_secure = 1;
#define TTYINQ_QUOTESIZE (TTYINQ_DATASIZE / BMSIZE)
#define BMSIZE 32
#define GETBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] & (1 << ((boff) % BMSIZE)))
#define SETBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] |= (1 << ((boff) % BMSIZE)))
#define CLRBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] &= ~(1 << ((boff) % BMSIZE)))
struct ttyinq_block
{
struct ttyinq_block *tib_prev;
struct ttyinq_block *tib_next;
uint32_t tib_quotes[TTYINQ_QUOTESIZE];
char tib_data[TTYINQ_DATASIZE];
};
static uma_zone_t ttyinq_zone;
#define TTYINQ_INSERT_TAIL(ti, tib) \
do \
{ \
if (ti->ti_end == 0) \
{ \
tib->tib_prev = NULL; \
tib->tib_next = ti->ti_firstblock; \
ti->ti_firstblock = tib; \
} \
else \
{ \
tib->tib_prev = ti->ti_lastblock; \
tib->tib_next = ti->ti_lastblock->tib_next; \
ti->ti_lastblock->tib_next = tib; \
} \
if (tib->tib_next != NULL) \
tib->tib_next->tib_prev = tib; \
ti->ti_nblocks++; \
} while (0)
#define TTYINQ_REMOVE_HEAD(ti) \
do \
{ \
ti->ti_firstblock = ti->ti_firstblock->tib_next; \
if (ti->ti_firstblock != NULL) \
ti->ti_firstblock->tib_prev = NULL; \
ti->ti_nblocks--; \
} while (0)
#define TTYINQ_RECYCLE(ti, tib) \
do \
{ \
if (ti->ti_quota <= ti->ti_nblocks) \
uma_zfree(ttyinq_zone, tib); \
else \
TTYINQ_INSERT_TAIL(ti, tib); \
} while (0)
int ttyinq_setsize(struct ttyinq *ti, struct lwp_tty *tp, size_t size)
{
struct ttyinq_block *tib;
ti->ti_quota = howmany(size, TTYINQ_DATASIZE);
while (ti->ti_quota > ti->ti_nblocks)
{
/*
* List is getting bigger.
* Add new blocks to the tail of the list.
*
* We must unlock the TTY temporarily, because we need
* to allocate memory. This won't be a problem, because
* in the worst case, another thread ends up here, which
* may cause us to allocate too many blocks, but this
* will be caught by the loop below.
*/
tty_unlock(tp);
tib = uma_zalloc(ttyinq_zone, M_WAITOK);
tty_lock(tp);
if (tty_gone(tp))
{
uma_zfree(ttyinq_zone, tib);
return -ENXIO;
}
TTYINQ_INSERT_TAIL(ti, tib);
}
return 0;
}
void ttyinq_free(struct ttyinq *ti)
{
struct ttyinq_block *tib;
ttyinq_flush(ti);
ti->ti_quota = 0;
while ((tib = ti->ti_firstblock) != NULL)
{
TTYINQ_REMOVE_HEAD(ti);
uma_zfree(ttyinq_zone, tib);
}
MPASS(ti->ti_nblocks == 0);
}
int ttyinq_read_uio(struct ttyinq *ti, struct lwp_tty *tp, struct uio *uio,
size_t rlen, size_t flen)
{
MPASS(rlen <= uio->uio_resid);
while (rlen > 0)
{
int error;
struct ttyinq_block *tib;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (ti->ti_begin == ti->ti_linestart)
return 0;
tib = ti->ti_firstblock;
if (tib == NULL)
return 0;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = ti->ti_begin;
cend = MIN(MIN(ti->ti_linestart, ti->ti_begin + rlen), TTYINQ_DATASIZE);
clen = cend - cbegin;
MPASS(clen >= flen);
rlen -= clen;
/*
* We can prevent buffering in some cases:
* - We need to read the block until the end.
* - We don't need to read the block until the end, but
* there is no data beyond it, which allows us to move
* the write pointer to a new block.
*/
if (cend == TTYINQ_DATASIZE || cend == ti->ti_end)
{
/*
* Fast path: zero copy. Remove the first block,
* so we can unlock the TTY temporarily.
*/
TTYINQ_REMOVE_HEAD(ti);
ti->ti_begin = 0;
/*
* Because we remove the first block, we must
* fix up the block offsets.
*/
#define CORRECT_BLOCK(t) \
do \
{ \
if (t <= TTYINQ_DATASIZE) \
t = 0; \
else \
t -= TTYINQ_DATASIZE; \
} while (0)
CORRECT_BLOCK(ti->ti_linestart);
CORRECT_BLOCK(ti->ti_reprint);
CORRECT_BLOCK(ti->ti_end);
#undef CORRECT_BLOCK
/*
* Temporary unlock and copy the data to
* userspace. We may need to flush trailing
* bytes, like EOF characters.
*/
tty_unlock(tp);
error = uiomove(tib->tib_data + cbegin, clen - flen, uio);
tty_lock(tp);
/* Block can now be readded to the list. */
TTYINQ_RECYCLE(ti, tib);
}
else
{
char ob[TTYINQ_DATASIZE - 1];
/*
* Slow path: store data in a temporary buffer.
*/
memcpy(ob, tib->tib_data + cbegin, clen - flen);
ti->ti_begin += clen;
MPASS(ti->ti_begin < TTYINQ_DATASIZE);
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(ob, clen - flen, uio);
tty_lock(tp);
}
if (error != 0)
return error;
if (tty_gone(tp))
return -ENXIO;
}
return 0;
}
rt_inline void ttyinq_set_quotes(struct ttyinq_block *tib, size_t offset,
size_t length, int value)
{
if (value)
{
/* Set the bits. */
for (; length > 0; length--, offset++) SETBIT(tib, offset);
}
else
{
/* Unset the bits. */
for (; length > 0; length--, offset++) CLRBIT(tib, offset);
}
}
size_t ttyinq_write(struct ttyinq *ti, const void *buf, size_t nbytes,
int quote)
{
const char *cbuf = buf;
struct ttyinq_block *tib;
unsigned int boff;
size_t l;
while (nbytes > 0)
{
boff = ti->ti_end % TTYINQ_DATASIZE;
if (ti->ti_end == 0)
{
/* First time we're being used or drained. */
MPASS(ti->ti_begin == 0);
tib = ti->ti_firstblock;
if (tib == NULL)
{
/* Queue has no blocks. */
break;
}
ti->ti_lastblock = tib;
}
else if (boff == 0)
{
/* We reached the end of this block on last write. */
tib = ti->ti_lastblock->tib_next;
if (tib == NULL)
{
/* We've reached the watermark. */
break;
}
ti->ti_lastblock = tib;
}
else
{
tib = ti->ti_lastblock;
}
/* Don't copy more than was requested. */
l = MIN(nbytes, TTYINQ_DATASIZE - boff);
MPASS(l > 0);
memcpy(tib->tib_data + boff, cbuf, l);
/* Set the quoting bits for the proper region. */
ttyinq_set_quotes(tib, boff, l, quote);
cbuf += l;
nbytes -= l;
ti->ti_end += l;
}
return (cbuf - (const char *)buf);
}
int ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t nbytes,
int quote)
{
size_t ret __unused;
if (ttyinq_bytesleft(ti) < nbytes)
return -1;
/* We should always be able to write it back. */
ret = ttyinq_write(ti, buf, nbytes, quote);
MPASS(ret == nbytes);
return 0;
}
void ttyinq_canonicalize(struct ttyinq *ti)
{
ti->ti_linestart = ti->ti_reprint = ti->ti_end;
ti->ti_startblock = ti->ti_reprintblock = ti->ti_lastblock;
}
size_t ttyinq_findchar(struct ttyinq *ti, const char *breakc, size_t maxlen,
char *lastc)
{
struct ttyinq_block *tib = ti->ti_firstblock;
unsigned int boff = ti->ti_begin;
unsigned int bend =
MIN(MIN(TTYINQ_DATASIZE, ti->ti_linestart), ti->ti_begin + maxlen);
MPASS(maxlen > 0);
if (tib == NULL)
return 0;
while (boff < bend)
{
if (strchr(breakc, tib->tib_data[boff]) && !GETBIT(tib, boff))
{
*lastc = tib->tib_data[boff];
return (boff - ti->ti_begin + 1);
}
boff++;
}
/* Not found - just process the entire block. */
return (bend - ti->ti_begin);
}
void ttyinq_flush(struct ttyinq *ti)
{
struct ttyinq_block *tib;
ti->ti_begin = 0;
ti->ti_linestart = 0;
ti->ti_reprint = 0;
ti->ti_end = 0;
/* Zero all data in the input queue to get rid of passwords. */
if (ttyinq_flush_secure)
{
for (tib = ti->ti_firstblock; tib != NULL; tib = tib->tib_next)
memset(&tib->tib_data, 0, sizeof tib->tib_data);
}
}
int ttyinq_peekchar(struct ttyinq *ti, char *c, int *quote)
{
unsigned int boff;
struct ttyinq_block *tib = ti->ti_lastblock;
if (ti->ti_linestart == ti->ti_end)
return -1;
MPASS(ti->ti_end > 0);
boff = (ti->ti_end - 1) % TTYINQ_DATASIZE;
*c = tib->tib_data[boff];
*quote = GETBIT(tib, boff);
return 0;
}
void ttyinq_unputchar(struct ttyinq *ti)
{
MPASS(ti->ti_linestart < ti->ti_end);
if (--ti->ti_end % TTYINQ_DATASIZE == 0)
{
/* Roll back to the previous block. */
ti->ti_lastblock = ti->ti_lastblock->tib_prev;
/*
* This can only fail if we are unputchar()'ing the
* first character in the queue.
*/
MPASS((ti->ti_lastblock == NULL) == (ti->ti_end == 0));
}
}
void ttyinq_reprintpos_set(struct ttyinq *ti)
{
ti->ti_reprint = ti->ti_end;
ti->ti_reprintblock = ti->ti_lastblock;
}
void ttyinq_reprintpos_reset(struct ttyinq *ti)
{
ti->ti_reprint = ti->ti_linestart;
ti->ti_reprintblock = ti->ti_startblock;
}
static void ttyinq_line_iterate(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator, void *data,
unsigned int offset, struct ttyinq_block *tib)
{
unsigned int boff;
/* Use the proper block when we're at the queue head. */
if (offset == 0)
tib = ti->ti_firstblock;
/* Iterate all characters and call the iterator function. */
for (; offset < ti->ti_end; offset++)
{
boff = offset % TTYINQ_DATASIZE;
MPASS(tib != NULL);
/* Call back the iterator function. */
iterator(data, tib->tib_data[boff], GETBIT(tib, boff));
/* Last byte iterated - go to the next block. */
if (boff == TTYINQ_DATASIZE - 1)
tib = tib->tib_next;
}
}
void ttyinq_line_iterate_from_linestart(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data)
{
ttyinq_line_iterate(ti, iterator, data, ti->ti_linestart,
ti->ti_startblock);
}
void ttyinq_line_iterate_from_reprintpos(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data)
{
ttyinq_line_iterate(ti, iterator, data, ti->ti_reprint,
ti->ti_reprintblock);
}
static int ttyinq_startup(void)
{
ttyinq_zone = uma_zcreate("ttyinq", sizeof(struct ttyinq_block), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
return 0;
}
INIT_PREV_EXPORT(ttyinq_startup);
#if 0
SYSINIT(ttyinq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyinq_startup, NULL);
#endif

View File

@ -0,0 +1,370 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "../bsd_porting.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TTY output queue buffering.
*
* The previous design of the TTY layer offered the so-called clists.
* These clists were used for both the input queues and the output
* queue. We don't use certain features on the output side, like quoting
* bits for parity marking and such. This mechanism is similar to the
* old clists, but only contains the features we need to buffer the
* output.
*/
struct ttyoutq_block
{
struct ttyoutq_block *tob_next;
char tob_data[TTYOUTQ_DATASIZE];
};
static uma_zone_t ttyoutq_zone;
#define TTYOUTQ_INSERT_TAIL(to, tob) \
do \
{ \
if (to->to_end == 0) \
{ \
tob->tob_next = to->to_firstblock; \
to->to_firstblock = tob; \
} \
else \
{ \
tob->tob_next = to->to_lastblock->tob_next; \
to->to_lastblock->tob_next = tob; \
} \
to->to_nblocks++; \
} while (0)
#define TTYOUTQ_REMOVE_HEAD(to) \
do \
{ \
to->to_firstblock = to->to_firstblock->tob_next; \
to->to_nblocks--; \
} while (0)
#define TTYOUTQ_RECYCLE(to, tob) \
do \
{ \
if (to->to_quota <= to->to_nblocks) \
uma_zfree(ttyoutq_zone, tob); \
else \
TTYOUTQ_INSERT_TAIL(to, tob); \
} while (0)
void ttyoutq_flush(struct ttyoutq *to)
{
to->to_begin = 0;
to->to_end = 0;
}
int ttyoutq_setsize(struct ttyoutq *to, struct lwp_tty *tp, size_t size)
{
struct ttyoutq_block *tob;
to->to_quota = howmany(size, TTYOUTQ_DATASIZE);
while (to->to_quota > to->to_nblocks)
{
/*
* List is getting bigger.
* Add new blocks to the tail of the list.
*
* We must unlock the TTY temporarily, because we need
* to allocate memory. This won't be a problem, because
* in the worst case, another thread ends up here, which
* may cause us to allocate too many blocks, but this
* will be caught by the loop below.
*/
tty_unlock(tp);
tob = uma_zalloc(ttyoutq_zone, M_WAITOK);
tty_lock(tp);
if (tty_gone(tp))
{
uma_zfree(ttyoutq_zone, tob);
return -ENXIO;
}
TTYOUTQ_INSERT_TAIL(to, tob);
}
return 0;
}
void ttyoutq_free(struct ttyoutq *to)
{
struct ttyoutq_block *tob;
ttyoutq_flush(to);
to->to_quota = 0;
while ((tob = to->to_firstblock) != NULL)
{
TTYOUTQ_REMOVE_HEAD(to);
uma_zfree(ttyoutq_zone, tob);
}
MPASS(to->to_nblocks == 0);
}
size_t ttyoutq_read(struct ttyoutq *to, void *buf, size_t len)
{
char *cbuf = buf;
while (len > 0)
{
struct ttyoutq_block *tob;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (to->to_begin == to->to_end)
break;
tob = to->to_firstblock;
if (tob == NULL)
break;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = to->to_begin;
cend = MIN(MIN(to->to_end, to->to_begin + len), TTYOUTQ_DATASIZE);
clen = cend - cbegin;
/* Copy the data out of the buffers. */
memcpy(cbuf, tob->tob_data + cbegin, clen);
cbuf += clen;
len -= clen;
if (cend == to->to_end)
{
/* Read the complete queue. */
to->to_begin = 0;
to->to_end = 0;
}
else if (cend == TTYOUTQ_DATASIZE)
{
/* Read the block until the end. */
TTYOUTQ_REMOVE_HEAD(to);
to->to_begin = 0;
to->to_end -= TTYOUTQ_DATASIZE;
TTYOUTQ_RECYCLE(to, tob);
}
else
{
/* Read the block partially. */
to->to_begin += clen;
}
}
return cbuf - (char *)buf;
}
/*
* An optimized version of ttyoutq_read() which can be used in pseudo
* TTY drivers to directly copy data from the outq to userspace, instead
* of buffering it.
*
* We can only copy data directly if we need to read the entire block
* back to the user, because we temporarily remove the block from the
* queue. Otherwise we need to copy it to a temporary buffer first, to
* make sure data remains in the correct order.
*/
int ttyoutq_read_uio(struct ttyoutq *to, struct lwp_tty *tp, struct uio *uio)
{
while (uio->uio_resid > 0)
{
int error;
struct ttyoutq_block *tob;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (to->to_begin == to->to_end)
return 0;
tob = to->to_firstblock;
if (tob == NULL)
return 0;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = to->to_begin;
cend = MIN(MIN(to->to_end, to->to_begin + uio->uio_resid),
TTYOUTQ_DATASIZE);
clen = cend - cbegin;
/*
* We can prevent buffering in some cases:
* - We need to read the block until the end.
* - We don't need to read the block until the end, but
* there is no data beyond it, which allows us to move
* the write pointer to a new block.
*/
if (cend == TTYOUTQ_DATASIZE || cend == to->to_end)
{
/*
* Fast path: zero copy. Remove the first block,
* so we can unlock the TTY temporarily.
*/
TTYOUTQ_REMOVE_HEAD(to);
to->to_begin = 0;
if (to->to_end <= TTYOUTQ_DATASIZE)
to->to_end = 0;
else
to->to_end -= TTYOUTQ_DATASIZE;
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(tob->tob_data + cbegin, clen, uio);
tty_lock(tp);
/* Block can now be readded to the list. */
TTYOUTQ_RECYCLE(to, tob);
}
else
{
char ob[TTYOUTQ_DATASIZE - 1];
/*
* Slow path: store data in a temporary buffer.
*/
memcpy(ob, tob->tob_data + cbegin, clen);
to->to_begin += clen;
MPASS(to->to_begin < TTYOUTQ_DATASIZE);
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(ob, clen, uio);
tty_lock(tp);
}
if (error != 0)
return error;
}
return 0;
}
size_t ttyoutq_write(struct ttyoutq *to, const void *buf, size_t nbytes)
{
const char *cbuf = buf;
struct ttyoutq_block *tob;
unsigned int boff;
size_t l;
while (nbytes > 0)
{
boff = to->to_end % TTYOUTQ_DATASIZE;
if (to->to_end == 0)
{
/* First time we're being used or drained. */
MPASS(to->to_begin == 0);
tob = to->to_firstblock;
if (tob == NULL)
{
/* Queue has no blocks. */
break;
}
to->to_lastblock = tob;
}
else if (boff == 0)
{
/* We reached the end of this block on last write. */
tob = to->to_lastblock->tob_next;
if (tob == NULL)
{
/* We've reached the watermark. */
break;
}
to->to_lastblock = tob;
}
else
{
tob = to->to_lastblock;
}
/* Don't copy more than was requested. */
l = MIN(nbytes, TTYOUTQ_DATASIZE - boff);
MPASS(l > 0);
memcpy(tob->tob_data + boff, cbuf, l);
cbuf += l;
nbytes -= l;
to->to_end += l;
}
return (cbuf - (const char *)buf);
}
int ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t nbytes)
{
size_t ret __unused;
if (ttyoutq_bytesleft(to) < nbytes)
return -1;
/* We should always be able to write it back. */
ret = ttyoutq_write(to, buf, nbytes);
MPASS(ret == nbytes);
return 0;
}
static int ttyoutq_startup(void)
{
ttyoutq_zone = uma_zcreate("ttyoutq", sizeof(struct ttyoutq_block), NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
return 0;
}
INIT_PREV_EXPORT(ttyoutq_startup);
#if 0
SYSINIT(ttyoutq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyoutq_startup, NULL);
#endif

View File

@ -0,0 +1,837 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-07 Shell init ver.
*/
#include <ipc/condvar.h>
#include <rid_bitmap.h>
#include <terminal/terminal.h>
#include <terminal/tty_internal.h>
#include <ptyfs.h>
#include <rtthread.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define PTS_EXTERNAL
/*
* Per-PTS structure.
*
* List of locks
* (t) locked by tty_lock()
* (c) const until freeing
*/
struct pts_softc
{
int pts_unit; /* (c) Device unit number. */
unsigned int pts_flags; /* (t) Device flags. */
#define PTS_PKT 0x1 /* Packet mode. */
#define PTS_FINISHED 0x2 /* Return errors on read()/write(). */
#define PTS_PTLOCKED 0x4 /* ioctl %TIOCSPTLCK/%TIOCGPTLCK */
char pts_pkt; /* (t) Unread packet mode data. */
struct rt_condvar pts_inwait; /* (t) Blocking write() on master. */
struct rt_wqueue pts_inpoll; /* (t) Select queue for write(). */
struct rt_condvar pts_outwait; /* (t) Blocking read() on master. */
struct rt_wqueue pts_outpoll; /* (t) Select queue for read(). */
struct ucred *pts_cred; /* (c) Resource limit. */
rt_device_t pts_master; /** (c) Master device.
* (Note: in rtsmart kernel, we support
* multi-instance ptmx )
*/
};
/**
* Controller-side file operations.
* (P)seudo-(T)erminal (M)ultiple(X)er
*/
static int ptsdev_read(struct lwp_tty *tp, struct uio *uio,
struct ucred *active_cred, int oflags,
struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
int error = 0;
char pkt;
if (uio->uio_resid == 0)
return (0);
tty_lock(tp);
for (;;)
{
/*
* Implement packet mode. When packet mode is turned on,
* the first byte contains a bitmask of events that
* occurred (start, stop, flush, window size, etc).
*/
if (psc->pts_flags & PTS_PKT && psc->pts_pkt)
{
pkt = psc->pts_pkt;
psc->pts_pkt = 0;
tty_unlock(tp);
error = uiomove(&pkt, 1, uio);
return (error);
}
/*
* Transmit regular data.
*
* XXX: We shouldn't use ttydisc_getc_poll()! Even
* though in this implementation, there is likely going
* to be data, we should just call ttydisc_getc_uio()
* and use its return value to sleep.
*/
if (ttydisc_getc_poll(tp))
{
if (psc->pts_flags & PTS_PKT)
{
/*
* XXX: Small race. Fortunately PTY
* consumers aren't multithreaded.
*/
tty_unlock(tp);
pkt = TIOCPKT_DATA;
error = uiomove(&pkt, 1, uio);
if (error)
return (error);
tty_lock(tp);
}
error = ttydisc_getc_uio(tp, uio);
break;
}
/* Maybe the device isn't used anyway. */
if (psc->pts_flags & PTS_FINISHED)
break;
/* Wait for more data. */
if (oflags & O_NONBLOCK)
{
error = EWOULDBLOCK;
break;
}
error = cv_wait_sig(&psc->pts_outwait, tp->t_mtx);
if (error != 0)
break;
}
tty_unlock(tp);
return (error);
}
static int ptsdev_write(struct lwp_tty *tp, struct uio *uio,
struct ucred *active_cred, int oflags,
struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
char ib[256], *ibstart;
size_t iblen, rintlen;
int error = 0;
if (uio->uio_resid == 0)
return (0);
for (;;)
{
ibstart = ib;
iblen = MIN(uio->uio_resid, sizeof ib);
error = uiomove(ib, iblen, uio);
tty_lock(tp);
if (error != 0)
{
iblen = 0;
goto done;
}
/*
* When possible, avoid the slow path. rint_bypass()
* copies all input to the input queue at once.
*/
MPASS(iblen > 0);
do
{
rintlen = ttydisc_rint_simple(tp, ibstart, iblen);
ibstart += rintlen;
iblen -= rintlen;
if (iblen == 0)
{
/* All data written. */
break;
}
/* Maybe the device isn't used anyway. */
if (psc->pts_flags & PTS_FINISHED)
{
error = -EIO;
goto done;
}
/* Wait for more data. */
if (oflags & O_NONBLOCK)
{
error = -EWOULDBLOCK;
goto done;
}
/* Wake up users on the slave side. */
ttydisc_rint_done(tp);
error = cv_wait_sig(&psc->pts_inwait, tp->t_mtx);
if (error != 0)
goto done;
} while (iblen > 0);
if (uio->uio_resid == 0)
break;
tty_unlock(tp);
}
done:
ttydisc_rint_done(tp);
tty_unlock(tp);
/*
* Don't account for the part of the buffer that we couldn't
* pass to the TTY.
*/
uio->uio_resid += iblen;
return (error);
}
static int ptsdev_ioctl(struct lwp_tty *tp, rt_ubase_t cmd, void *data,
struct ucred *active_cred, int fflags,
struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
int error = 0, sig;
switch (cmd)
{
#ifdef USING_BSD_IOCTL_EXT
case FIODTYPE:
*(int *)data = D_TTY;
return (0);
#endif
case FIONBIO:
/* This device supports non-blocking operation. */
return (0);
case FIONREAD:
tty_lock(tp);
if (psc->pts_flags & PTS_FINISHED)
{
/* Force read() to be called. */
*(int *)data = 1;
}
else
{
*(int *)data = ttydisc_getc_poll(tp);
}
tty_unlock(tp);
return (0);
#ifdef USING_BSD_IOCTL_EXT
case FIODGNAME:
#ifdef COMPAT_FREEBSD32
case FIODGNAME_32:
#endif
{
struct fiodgname_arg *fgn;
const char *p;
int i;
/* Reverse device name lookups, for ptsname() and ttyname(). */
fgn = data;
p = tty_devname(tp);
i = strlen(p) + 1;
if (i > fgn->len)
return -EINVAL;
return (copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i));
}
#endif
/*
* We need to implement TIOCGPGRP and TIOCGSID here again. When
* called on the pseudo-terminal master, it should not check if
* the terminal is the foreground terminal of the calling
* process.
*
* TIOCGETA is also implemented here. Various Linux PTY routines
* often call isatty(), which is implemented by tcgetattr().
*/
case TIOCGETA:
/* Obtain terminal flags through tcgetattr(). */
tty_lock(tp);
*(struct termios *)data = tp->t_termios;
tty_unlock(tp);
return (0);
case TIOCSETAF:
case TIOCSETAW:
/*
* We must make sure we turn tcsetattr() calls of TCSAFLUSH and
* TCSADRAIN into something different. If an application would
* call TCSAFLUSH or TCSADRAIN on the master descriptor, it may
* deadlock waiting for all data to be read.
*/
cmd = TIOCSETA;
break;
case TIOCGPTN:
/*
* Get the device unit number.
*/
if (psc->pts_unit < 0)
return -ENOTTY;
*(unsigned int *)data = psc->pts_unit;
return (0);
case TIOCGPGRP:
/* Get the foreground process group ID. */
tty_lock(tp);
if (tp->t_pgrp != NULL)
*(int *)data = tp->t_pgrp->pgid;
else
*(int *)data = NO_PID;
tty_unlock(tp);
return (0);
case TIOCGSID:
/* Get the session leader process ID. */
tty_lock(tp);
if (tp->t_session == NULL)
error = -ENOTTY;
else
*(int *)data = tp->t_session->sid;
tty_unlock(tp);
return (error);
#ifdef USING_BSD_IOCTL_EXT
case TIOCPTMASTER:
/* Yes, we are a pseudo-terminal master. */
return (0);
#endif /* USING_BSD_IOCTL_EXT */
case TIOCSIG:
/* Signal the foreground process group. */
sig = *(int *)data;
if (sig < 1 || sig >= _LWP_NSIG)
return -EINVAL;
tty_lock(tp);
lwp_tty_signal_pgrp(tp, sig);
tty_unlock(tp);
return (0);
case TIOCPKT:
/* Enable/disable packet mode. */
tty_lock(tp);
if (*(int *)data)
psc->pts_flags |= PTS_PKT;
else
psc->pts_flags &= ~PTS_PKT;
tty_unlock(tp);
return (0);
}
/* Just redirect this ioctl to the slave device. */
tty_lock(tp);
error = tty_ioctl(tp, cmd, data, fflags, td);
tty_unlock(tp);
if (error == -ENOIOCTL)
error = -ENOTTY;
return error;
}
static int ptsdev_poll(struct lwp_tty *tp, struct rt_pollreq *req,
struct ucred *active_cred, struct rt_thread *td)
{
struct pts_softc *psc = tty_softc(tp);
int revents = 0;
int events = req->_key;
tty_lock(tp);
if (psc->pts_flags & PTS_FINISHED)
{
/* Slave device is not opened. */
tty_unlock(tp);
return ((events & (POLLIN | POLLRDNORM)) | POLLHUP);
}
if (events & (POLLIN | POLLRDNORM))
{
/* See if we can getc something. */
if (ttydisc_getc_poll(tp) || (psc->pts_flags & PTS_PKT && psc->pts_pkt))
revents |= events & (POLLIN | POLLRDNORM);
}
if (events & (POLLOUT | POLLWRNORM))
{
/* See if we can rint something. */
if (ttydisc_rint_poll(tp))
revents |= events & (POLLOUT | POLLWRNORM);
}
/*
* No need to check for POLLHUP here. This device cannot be used
* as a callout device, which means we always have a carrier,
* because the master is.
*/
if (revents == 0)
{
/*
* This code might look misleading, but the naming of
* poll events on this side is the opposite of the slave
* device.
*/
if (events & (POLLIN | POLLRDNORM))
rt_poll_add(&psc->pts_outpoll, req);
if (events & (POLLOUT | POLLWRNORM))
rt_poll_add(&psc->pts_inpoll, req);
}
tty_unlock(tp);
return (revents);
}
#if USING_BSD_KQUEUE
/*
* kqueue support.
*/
static void pts_kqops_read_detach(struct knote *kn)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
knlist_remove(&psc->pts_outpoll.si_note, kn, 0);
}
static int pts_kqops_read_event(struct knote *kn, long hint)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
if (psc->pts_flags & PTS_FINISHED)
{
kn->kn_flags |= EV_EOF;
return (1);
}
else
{
kn->kn_data = ttydisc_getc_poll(tp);
return (kn->kn_data > 0);
}
}
static void pts_kqops_write_detach(struct knote *kn)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
knlist_remove(&psc->pts_inpoll.si_note, kn, 0);
}
static int pts_kqops_write_event(struct knote *kn, long hint)
{
struct file *fp = kn->kn_fp;
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
if (psc->pts_flags & PTS_FINISHED)
{
kn->kn_flags |= EV_EOF;
return (1);
}
else
{
kn->kn_data = ttydisc_rint_poll(tp);
return (kn->kn_data > 0);
}
}
static struct filterops pts_kqops_read = {
.f_isfd = 1,
.f_detach = pts_kqops_read_detach,
.f_event = pts_kqops_read_event,
};
static struct filterops pts_kqops_write = {
.f_isfd = 1,
.f_detach = pts_kqops_write_detach,
.f_event = pts_kqops_write_event,
};
static int ptsdev_kqfilter(struct file *fp, struct knote *kn)
{
struct lwp_tty *tp = fp->f_data;
struct pts_softc *psc = tty_softc(tp);
int error = 0;
tty_lock(tp);
switch (kn->kn_filter)
{
case EVFILT_READ:
kn->kn_fop = &pts_kqops_read;
knlist_add(&psc->pts_outpoll.si_note, kn, 1);
break;
case EVFILT_WRITE:
kn->kn_fop = &pts_kqops_write;
knlist_add(&psc->pts_inpoll.si_note, kn, 1);
break;
default:
error = EINVAL;
break;
}
tty_unlock(tp);
return (error);
}
#endif
#if USING_BSD_STAT
static int ptsdev_stat(struct file *fp, struct stat *sb,
struct ucred *active_cred)
{
struct lwp_tty *tp = fp->f_data;
#ifdef PTS_EXTERNAL
struct pts_softc *psc = tty_softc(tp);
#endif /* PTS_EXTERNAL */
struct cdev *dev = tp->t_dev;
/*
* According to POSIX, we must implement an fstat(). This also
* makes this implementation compatible with Linux binaries,
* because Linux calls fstat() on the pseudo-terminal master to
* obtain st_rdev.
*
* XXX: POSIX also mentions we must fill in st_dev, but how?
*/
bzero(sb, sizeof *sb);
#ifdef PTS_EXTERNAL
if (psc->pts_cdev != NULL)
sb->st_ino = sb->st_rdev = dev2udev(psc->pts_cdev);
else
#endif /* PTS_EXTERNAL */
sb->st_ino = sb->st_rdev = tty_udev(tp);
sb->st_atim = dev->si_atime;
sb->st_ctim = dev->si_ctime;
sb->st_mtim = dev->si_mtime;
sb->st_uid = dev->si_uid;
sb->st_gid = dev->si_gid;
sb->st_mode = dev->si_mode | S_IFCHR;
return (0);
}
#endif
static int ptsdev_close(struct lwp_tty *tp, struct rt_thread *td)
{
/* Deallocate TTY device. */
tty_lock(tp);
tty_rel_gone(tp);
#ifdef USING_BSD_VNODE
/* TODO: consider the vnode operation on DFS */
/*
* Open of /dev/ptmx or /dev/ptyXX changes the type of file
* from DTYPE_VNODE to DTYPE_PTS. vn_open() increases vnode
* use count, we need to decrement it, and possibly do other
* required cleanup.
*/
if (fp->f_vnode != NULL)
return (vnops.fo_close(fp, td));
#endif /* USING_BSD_VNODE */
return 0;
}
#ifdef USING_BSD_KINFO
static int ptsdev_fill_kinfo(struct file *fp, struct kinfo_file *kif,
struct filedesc *fdp)
{
struct lwp_tty *tp;
kif->kf_type = KF_TYPE_PTS;
tp = fp->f_data;
kif->kf_un.kf_pts.kf_pts_dev = tty_udev(tp);
kif->kf_un.kf_pts.kf_pts_dev_freebsd11 =
kif->kf_un.kf_pts.kf_pts_dev; /* truncate */
strlcpy(kif->kf_path, tty_devname(tp), sizeof(kif->kf_path));
return (0);
}
#endif
struct bsd_fileops bsd_ptsdev_methods = {
.fo_read = ptsdev_read,
.fo_write = ptsdev_write,
// .fo_truncate = invfo_truncate,
.fo_ioctl = ptsdev_ioctl,
.fo_poll = ptsdev_poll,
// .fo_kqfilter = ptsdev_kqfilter,
// .fo_stat = ptsdev_stat,
.fo_close = ptsdev_close,
// .fo_chmod = invfo_chmod,
// .fo_chown = invfo_chown,
// .fo_sendfile = invfo_sendfile,
// .fo_fill_kinfo = ptsdev_fill_kinfo,
.fo_flags = DFLAG_PASSABLE,
};
/*
* Driver-side hooks.
*/
static void ptsdrv_outwakeup(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
cv_broadcast(&psc->pts_outwait);
rt_wqueue_wakeup_all(&psc->pts_outpoll, (void *)POLLIN);
}
static void ptsdrv_inwakeup(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
cv_broadcast(&psc->pts_inwait);
rt_wqueue_wakeup_all(&psc->pts_inpoll, (void *)POLLOUT);
}
static int ptsdrv_open(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
/* for ioctl(TIOCSPTLCK) */
if (psc->pts_flags & PTS_PTLOCKED)
return -EIO;
psc->pts_flags &= ~PTS_FINISHED;
return 0;
}
static void ptsdrv_close(struct lwp_tty *tp)
{
struct pts_softc *psc = tty_softc(tp);
/* Wake up any blocked readers/writers. */
psc->pts_flags |= PTS_FINISHED;
ptsdrv_outwakeup(tp);
ptsdrv_inwakeup(tp);
}
static void ptsdrv_pktnotify(struct lwp_tty *tp, char event)
{
struct pts_softc *psc = tty_softc(tp);
/*
* Clear conflicting flags.
*/
switch (event)
{
case TIOCPKT_STOP:
psc->pts_pkt &= ~TIOCPKT_START;
break;
case TIOCPKT_START:
psc->pts_pkt &= ~TIOCPKT_STOP;
break;
case TIOCPKT_NOSTOP:
psc->pts_pkt &= ~TIOCPKT_DOSTOP;
break;
case TIOCPKT_DOSTOP:
psc->pts_pkt &= ~TIOCPKT_NOSTOP;
break;
}
psc->pts_pkt |= event;
/**
* Note: on smart, we don't wakeup master until it's willing to accept
* packet event. Because on poll, we setup POLLIN for PTS_PKT only. So There
* is a chance when we wakeup ipc but we can't wakeup user again. Since
* current wakeup will remove the wakequeue node on the meanwhile
*/
if (psc->pts_flags & PTS_PKT)
ptsdrv_outwakeup(tp);
}
static void ptsdrv_free(void *softc)
{
struct pts_softc *psc = softc;
/* Make device number available again. */
if (psc->pts_unit >= 0)
ptyfs_unregister_pts(psc->pts_master, psc->pts_unit);
#ifdef USING_BSD_UCRED
chgptscnt(psc->pts_cred->cr_ruidinfo, -1, 0);
racct_sub_cred(psc->pts_cred, RACCT_NPTS, 1);
crfree(psc->pts_cred);
#endif
rt_wqueue_wakeup_all(&psc->pts_inpoll, (void *)POLLHUP);
rt_wqueue_wakeup_all(&psc->pts_outpoll, (void *)POLLHUP);
rt_free(psc);
}
static struct lwp_ttydevsw pts_class = {
.tsw_flags = TF_NOPREFIX,
.tsw_outwakeup = ptsdrv_outwakeup,
.tsw_inwakeup = ptsdrv_inwakeup,
.tsw_open = ptsdrv_open,
.tsw_close = ptsdrv_close,
.tsw_pktnotify = ptsdrv_pktnotify,
.tsw_free = ptsdrv_free,
};
int pts_alloc(int fflags, struct rt_thread *td, struct dfs_file *ptm_file)
{
int unit;
struct lwp_tty *tp;
struct pts_softc *psc;
char name_buf[DIRENT_NAME_MAX];
const char *rootpath;
rt_device_t ptmx_device = ptm_file->vnode->data;
#ifdef USING_BSD_UCRED
struct rt_lwp *p = td->lwp;
int ok, error;
struct ucred *cred = td->td_ucred;
#endif
/* Resource limiting. */
#ifdef USING_BSD_UCRED
LWP_LOCK(p);
error = racct_add(p, RACCT_NPTS, 1);
if (error != 0)
{
LWP_UNLOCK(p);
return -EAGAIN;
}
ok = chgptscnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPTS));
if (!ok)
{
racct_sub(p, RACCT_NPTS, 1);
LWP_UNLOCK(p);
return -EAGAIN;
}
LWP_UNLOCK(p);
#endif
/* Allocate TTY and softc. */
psc = rt_calloc(1, sizeof(struct pts_softc));
cv_init(&psc->pts_inwait, "ptsin");
cv_init(&psc->pts_outwait, "ptsout");
rt_wqueue_init(&psc->pts_inpoll);
rt_wqueue_init(&psc->pts_outpoll);
psc->pts_master = ptmx_device;
#ifdef USING_BSD_UCRED
psc->pts_cred = crhold(cred);
#else
psc->pts_cred = 0;
#endif
tp = lwp_tty_create(&pts_class, psc);
if (!tp)
{
rt_free(psc);
rt_condvar_detach(&psc->pts_inwait);
rt_condvar_detach(&psc->pts_outwait);
return -ENOMEM;
}
/* Try to allocate a new pts uint*/
unit = ptyfs_register_pts(ptmx_device, &tp->parent);
if (unit < 0)
{
#ifdef USING_BSD_UCRED
racct_sub(p, RACCT_NPTS, 1);
chgptscnt(cred->cr_ruidinfo, -1, 0);
#endif
lwp_tty_delete(tp);
return -EAGAIN;
}
psc->pts_unit = unit;
/* Expose the slave device as well. */
#ifdef USING_BSD_UCRED
tty_makedev(tp, td->td_ucred, "pts/%u", psc->pts_unit);
#else
rootpath = ptyfs_get_rootpath(ptmx_device);
RT_ASSERT(rootpath[strlen(rootpath) - 1] != '/');
snprintf(name_buf, DIRENT_NAME_MAX, "%s/%d", rootpath, psc->pts_unit);
/* setup the pts */
lwp_tty_register(tp, name_buf);
/* now this file operating on new pty */
ptm_file->data = tp;
#endif
return 0;
}
void pts_set_lock(lwp_tty_t pts, rt_bool_t is_lock)
{
struct pts_softc *psc = tty_softc(pts);
if (is_lock)
psc->pts_flags |= PTS_PTLOCKED;
else
psc->pts_flags &= ~PTS_PTLOCKED;
}
rt_bool_t pts_is_locked(lwp_tty_t pts)
{
struct pts_softc *psc = tty_softc(pts);
return !!(psc->pts_flags & PTS_PTLOCKED);
}
int pts_get_pktmode(lwp_tty_t pts)
{
struct pts_softc *psc = tty_softc(pts);
return !!(psc->pts_flags & PTS_PKT);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,414 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#ifndef __LWP_TERMINAL_H__
#define __LWP_TERMINAL_H__
#include "bsd_ttyqueue.h"
#include "bsd_ttydisc.h"
#ifdef USING_BSD_HOOK
#include "bsd_ttyhook.h"
#endif
#include <lwp.h>
#include <rtdef.h>
/* include kernel header for termios base definitions */
#include <termios.h>
/* for _POSIX_VDISABLE */
#include <unistd.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
struct lwp_tty;
/*
* Driver routines that are called from the line discipline to adjust
* hardware parameters and such.
*/
typedef int tsw_open_t(struct lwp_tty *tp);
typedef void tsw_close_t(struct lwp_tty *tp);
typedef void tsw_outwakeup_t(struct lwp_tty *tp);
typedef void tsw_inwakeup_t(struct lwp_tty *tp);
typedef int tsw_ioctl_t(struct lwp_tty *tp, rt_ubase_t cmd, rt_caddr_t data,
struct rt_thread *td);
typedef int tsw_cioctl_t(struct lwp_tty *tp, int unit, rt_ubase_t cmd, rt_caddr_t data,
struct rt_thread *td);
typedef int tsw_param_t(struct lwp_tty *tp, struct termios *t);
typedef int tsw_modem_t(struct lwp_tty *tp, int sigon, int sigoff);
typedef int tsw_mmap_t(struct lwp_tty *tp, vm_ooffset_t offset,
vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr);
typedef void tsw_pktnotify_t(struct lwp_tty *tp, char event);
typedef void tsw_free_t(void *softc);
typedef rt_bool_t tsw_busy_t(struct lwp_tty *tp);
struct lwp_ttydevsw
{
unsigned int tsw_flags; /* Default TTY flags. */
tsw_open_t *tsw_open; /* Device opening. */
tsw_close_t *tsw_close; /* Device closure. */
tsw_outwakeup_t *tsw_outwakeup; /* Output available. */
tsw_inwakeup_t *tsw_inwakeup; /* Input can be stored again. */
tsw_ioctl_t *tsw_ioctl; /* ioctl() hooks. */
tsw_cioctl_t *tsw_cioctl; /* ioctl() on control devices. */
tsw_param_t *tsw_param; /* TIOCSETA device parameter setting. */
tsw_modem_t *tsw_modem; /* Modem sigon/sigoff. */
tsw_mmap_t *tsw_mmap; /* mmap() hooks. */
tsw_pktnotify_t *tsw_pktnotify; /* TIOCPKT events. */
tsw_free_t *tsw_free; /* Destructor. */
tsw_busy_t *tsw_busy; /* Draining output. */
void *tsw_spare[3]; /* For future use. */
};
typedef struct lwp_ttydevsw *lwp_ttydevsw_t;
struct lwp_tty
{
struct rt_device parent; /* inherit from Class:RT_Device */
struct rt_mutex *t_mtx; /* TTY lock. */
struct rt_mutex t_mtxobj; /* Per-TTY lock (when not borrowing). */
rt_list_t t_list; /* (l) TTY list entry. */
int t_drainwait; /* (t) TIOCDRAIN timeout seconds. */
unsigned int t_flags; /* (t) Terminal option flags. */
/* Keep flags in sync with db_show_tty and pstat(8). */
#define TF_NOPREFIX 0x00001 /* Don't prepend "tty" to device name. */
#define TF_INITLOCK 0x00002 /* Create init/lock state devices. */
#define TF_CALLOUT 0x00004 /* Create "cua" devices. */
#define TF_OPENED_IN 0x00008 /* "tty" node is in use. */
#define TF_OPENED_OUT 0x00010 /* "cua" node is in use. */
#define TF_OPENED_CONS 0x00020 /* Device in use as console. */
#define TF_OPENED (TF_OPENED_IN | TF_OPENED_OUT | TF_OPENED_CONS)
#define TF_GONE 0x00040 /* Device node is gone. */
#define TF_OPENCLOSE 0x00080 /* Device is in open()/close(). */
#define TF_ASYNC 0x00100 /* Asynchronous I/O enabled. */
#define TF_LITERAL 0x00200 /* Accept the next character literally. */
#define TF_HIWAT_IN 0x00400 /* We've reached the input watermark. */
#define TF_HIWAT_OUT 0x00800 /* We've reached the output watermark. */
#define TF_HIWAT (TF_HIWAT_IN | TF_HIWAT_OUT)
#define TF_STOPPED 0x01000 /* Output flow control - stopped. */
#define TF_EXCLUDE 0x02000 /* Exclusive access. */
#define TF_BYPASS 0x04000 /* Optimized input path. */
#define TF_ZOMBIE 0x08000 /* Modem disconnect received. */
#define TF_HOOK 0x10000 /* TTY has hook attached. */
#define TF_BUSY_IN 0x20000 /* Process busy in read() -- not supported. */
#define TF_BUSY_OUT 0x40000 /* Process busy in write(). */
#define TF_BUSY (TF_BUSY_IN | TF_BUSY_OUT)
unsigned int t_revokecnt; /* (t) revoke() count. */
/* Buffering mechanisms. */
struct ttyinq t_inq; /* (t) Input queue. */
size_t t_inlow; /* (t) Input low watermark. */
struct ttyoutq t_outq; /* (t) Output queue. */
size_t t_outlow; /* (t) Output low watermark. */
/* Sleeping mechanisms. */
struct rt_condvar t_inwait; /* (t) Input wait queue. */
struct rt_condvar t_outwait; /* (t) Output wait queue. */
struct rt_condvar t_outserwait; /* (t) Serial output wait queue. */
struct rt_condvar t_bgwait; /* (t) Background wait queue. */
struct rt_condvar t_dcdwait; /* (t) Carrier Detect wait queue. */
struct rt_wqueue t_inpoll; /* (t) Input poll queue. */
struct rt_wqueue t_outpoll; /* (t) Output poll queue. */
#ifdef USING_BSD_AIO
struct sigio *t_sigio; /* (t) Asynchronous I/O. */
#endif
struct termios t_termios; /* (t) I/O processing flags. */
struct winsize t_winsize; /* (t) Window size. */
unsigned int t_column; /* (t) Current cursor position. */
unsigned int t_writepos; /* (t) Where input was interrupted. */
int t_compatflags; /* (t) COMPAT_43TTY flags. */
/* Init/lock-state devices. */
struct termios t_termios_init_in; /* tty%s.init. */
struct termios t_termios_lock_in; /* tty%s.lock. */
#ifdef USING_BSD_INIT_LOCK_DEVICE
struct termios t_termios_init_out; /* cua%s.init. */
struct termios t_termios_lock_out; /* cua%s.lock. */
#endif /* USING_BSD_INIT_LOCK_DEVICE */
struct lwp_ttydevsw *t_devsw; /* (c) Driver hooks. */
#ifdef USING_BSD_HOOK
struct lwp_ttyhook *t_hook; /* (t) Capture/inject hook. */
#endif
/* Process signal delivery. */
struct rt_processgroup *t_pgrp; /* (t) Foreground process group. */
struct rt_session *t_session; /* (t) Associated session. */
unsigned int t_sessioncnt; /* (t) Backpointing sessions. */
void *t_devswsoftc; /* (c) Soft config, for drivers. */
#ifdef USING_BSD_HOOK
void *t_hooksoftc; /* (t) Soft config, for hooks. */
#endif
#ifdef USING_BSD_CHAR_DEVICE
struct cdev *t_dev; /* (c) Primary character device. */
#endif /* USING_BSD_CHAR_DEVICE */
#ifdef USING_BSD_SIGINFO
size_t t_prbufsz; /* (t) SIGINFO buffer size. */
char t_prbuf[]; /* (t) SIGINFO buffer. */
#endif /* USING_BSD_SIGINFO */
};
typedef struct lwp_tty *lwp_tty_t;
/* Allocation and deallocation. */
void tty_rel_pgrp(struct lwp_tty *tp, struct rt_processgroup *pgrp);
void tty_rel_sess(struct lwp_tty *tp, struct rt_session *sess);
void tty_rel_gone(struct lwp_tty *tp);
/* tty locking mechanism */
#define tty_getlock(tp) ((tp)->t_mtx)
#define tty_lock(tp) rt_mutex_take(tty_getlock(tp), RT_WAITING_FOREVER);
#define tty_unlock(tp) rt_mutex_release(tty_getlock(tp))
#define tty_lock_owned(tp) \
(rt_mutex_get_owner(tty_getlock(tp)) == rt_thread_self())
#define tty_lock_notrecused(tp) (rt_mutex_get_hold(tty_getlock(tp)) == 1)
#define tty_assert_locked(tp) RT_ASSERT(tty_lock_owned(tp))
#define tty_lock_assert(tp, option) \
RT_ASSERT(((option) == (MA_OWNED | MA_NOTRECURSED)) && \
(tty_lock_owned(tp) && tty_lock_notrecused(tp)))
/* System messages. */
int tty_checkoutq(struct lwp_tty *tp);
int tty_putchar(struct lwp_tty *tp, char c);
int tty_putstrn(struct lwp_tty *tp, const char *p, size_t n);
int tty_ioctl(struct lwp_tty *tp, rt_ubase_t cmd, void *data, int fflag,
struct rt_thread *td);
int tty_ioctl_compat(struct lwp_tty *tp, rt_ubase_t cmd, rt_caddr_t data, int fflag,
struct rt_thread *td);
void tty_set_winsize(struct lwp_tty *tp, const struct winsize *wsz);
void tty_init_console(struct lwp_tty *tp, speed_t speed);
void tty_flush(struct lwp_tty *tp, int flags);
void tty_hiwat_in_block(struct lwp_tty *tp);
void tty_hiwat_in_unblock(struct lwp_tty *tp);
dev_t tty_udev(struct lwp_tty *tp);
/* tesing on tty */
#define tty_opened(tp) ((tp)->t_flags & TF_OPENED)
#define tty_gone(tp) ((tp)->t_flags & TF_GONE)
#define tty_softc(tp) ((tp)->t_devswsoftc)
#define tty_devname(tp) ((tp)->parent.parent.name)
/**
* @brief TTY registeration on device subsystem
*
* @warning It's the duty of the caller to ensure that the name is not
* identical to any existed registered devices.
*
* @param terminal the target tty device
* @param name name of the device (must be exclusive)
* @return rt_err_t RT_EOK on success
*/
rt_err_t lwp_tty_register(lwp_tty_t terminal, const char *name);
/**
* @brief TTY allocation and deallocation. TTY devices can be deallocated when
* the driver doesn't use it anymore, when the TTY isn't a session's
* controlling TTY and when the device node isn't opened through devfs.
*
* @param handle device handle of tty
* @param softc device configuration binding on tty
* @param prefix device name prefix
* @param cutom_mtx the lock provided to protect tty
* @return lwp_tty_t NULL on failure
*/
lwp_tty_t lwp_tty_create_ext(lwp_ttydevsw_t handle, void *softc,
rt_mutex_t custom_mtx);
/**
* @brief Handful version of lwp_tty_create_ext
*
* @param softc device configuration binding on tty
* @param cutom_mtx the lock provided to protect tty
* @param prefix device name prefix
* @return lwp_tty_t NULL on failure
*/
lwp_tty_t lwp_tty_create(lwp_ttydevsw_t handle, void *softc);
void lwp_tty_delete(lwp_tty_t tp);
void lwp_tty_signal_sessleader(struct lwp_tty *tp, int sig);
void lwp_tty_signal_pgrp(struct lwp_tty *tp, int sig);
/**
* @brief Create a new pseudo-terminal multiplexer
*
* @param root_path path of root mount point of ptyfs
* @return rt_device_t new device object if succeed, otherwise NULL
*/
rt_err_t lwp_ptmx_init(rt_device_t ptmx_device, const char *root_path);
#define LWP_CONSOLE_LOWEST_PRIOR 0
#define LWP_CONSOLE_HIGHEST_PRIO INT_MAX
/**
* @brief Register an alternative backend tty device as console
*/
rt_err_t lwp_console_register_backend(struct rt_device *bakdev, int prio);
rt_inline int ttydevsw_open(struct lwp_tty *tp)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
return (tp->t_devsw->tsw_open(tp));
}
rt_inline void ttydevsw_close(struct lwp_tty *tp)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
tp->t_devsw->tsw_close(tp);
}
rt_inline void ttydevsw_outwakeup(struct lwp_tty *tp)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
/* Prevent spurious wakeups. */
if (ttydisc_getc_poll(tp) == 0)
return;
tp->t_devsw->tsw_outwakeup(tp);
}
rt_inline void ttydevsw_inwakeup(struct lwp_tty *tp)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
/* Prevent spurious wakeups. */
if (tp->t_flags & TF_HIWAT_IN)
return;
tp->t_devsw->tsw_inwakeup(tp);
}
rt_inline int ttydevsw_ioctl(struct lwp_tty *tp, rt_ubase_t cmd, rt_caddr_t data,
struct rt_thread *td)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
return (tp->t_devsw->tsw_ioctl(tp, cmd, data, td));
}
rt_inline int ttydevsw_cioctl(struct lwp_tty *tp, int unit, rt_ubase_t cmd,
rt_caddr_t data, struct rt_thread *td)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
return (tp->t_devsw->tsw_cioctl(tp, unit, cmd, data, td));
}
rt_inline int ttydevsw_param(struct lwp_tty *tp, struct termios *t)
{
MPASS(!tty_gone(tp));
return (tp->t_devsw->tsw_param(tp, t));
}
rt_inline int ttydevsw_modem(struct lwp_tty *tp, int sigon, int sigoff)
{
MPASS(!tty_gone(tp));
return (tp->t_devsw->tsw_modem(tp, sigon, sigoff));
}
rt_inline int ttydevsw_mmap(struct lwp_tty *tp, vm_ooffset_t offset,
vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
{
MPASS(!tty_gone(tp));
return (tp->t_devsw->tsw_mmap(tp, offset, paddr, nprot, memattr));
}
rt_inline void ttydevsw_pktnotify(struct lwp_tty *tp, char event)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
tp->t_devsw->tsw_pktnotify(tp, event);
}
rt_inline void ttydevsw_free(struct lwp_tty *tp)
{
MPASS(tty_gone(tp));
tp->t_devsw->tsw_free(tty_softc(tp));
}
rt_inline rt_bool_t ttydevsw_busy(struct lwp_tty *tp)
{
tty_assert_locked(tp);
MPASS(!tty_gone(tp));
return (tp->t_devsw->tsw_busy(tp));
}
rt_inline size_t ttydisc_read_poll(struct lwp_tty *tp)
{
tty_assert_locked(tp);
return ttyinq_bytescanonicalized(&tp->t_inq);
}
rt_inline size_t ttydisc_write_poll(struct lwp_tty *tp)
{
tty_assert_locked(tp);
return ttyoutq_bytesleft(&tp->t_outq);
}
#endif /* __LWP_TERMINAL_H__ */

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#ifndef __TTY_CONFIG_H__
#define __TTY_CONFIG_H__
/* default buffer size of tty siginfo */
#define LWP_TTY_PRBUF_SIZE 256
/*
* System wide defaults for terminal state.
*/
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1982, 1986, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ttydefaults.h 8.4 (Berkeley) 1/21/94
*/
/*
* Defaults on "first" open.
*/
#define TTYDEF_IFLAG (BRKINT | ICRNL | IMAXBEL | IXON | IXANY | IUTF8)
#define TTYDEF_OFLAG (OPOST | ONLCR)
#define TTYDEF_LFLAG_NOECHO (ICANON | ISIG | IEXTEN)
#define TTYDEF_LFLAG_ECHO \
(TTYDEF_LFLAG_NOECHO | ECHO | ECHOE | ECHOKE | ECHOCTL)
#define TTYDEF_LFLAG TTYDEF_LFLAG_ECHO
#define TTYDEF_CFLAG (CREAD | CS8 | HUPCL)
#define TTYDEF_SPEED (B9600)
/*
* Control Character Defaults
*/
/*
* XXX: A lot of code uses lowercase characters, but control-character
* conversion is actually only valid when applied to uppercase
* characters. We just treat lowercase characters as if they were
* inserted as uppercase.
*/
#define _CONTROL(c) \
((c) >= 'a' && (c) <= 'z' ? ((c) - 'a' + 1) : (((c) - 'A' + 1) & 0x7f))
#define CEOF _CONTROL('D')
#define CEOL 0xff /* XXX avoid _POSIX_VDISABLE */
#define CERASE 0x7f
#define CERASE2 _CONTROL('H')
#define CINTR _CONTROL('C')
#define CSTATUS _CONTROL('T')
#define CKILL _CONTROL('U')
#define CMIN 1
#define CQUIT _CONTROL('\\')
#define CSUSP _CONTROL('Z')
#define CTIME 0
#define CDSUSP _CONTROL('Y')
#define CSTART _CONTROL('Q')
#define CSTOP _CONTROL('S')
#define CLNEXT _CONTROL('V')
#define CDISCARD _CONTROL('O')
#define CWERASE _CONTROL('W')
#define CREPRINT _CONTROL('R')
#define CEOT CEOF
/* compat */
#define CBRK CEOL
#define CRPRNT CREPRINT
#define CFLUSH CDISCARD
/* PROTECTED INCLUSION ENDS HERE */
#endif /* !__TTY_CONFIG_H__ */
/*
* #define TTY_CONF_INCLUDE_CCHARS to include an array of default control
* characters.
*/
#ifdef TTY_CONF_INCLUDE_CCHARS
#include <rtdef.h>
#include <termios.h>
#include <unistd.h>
static const cc_t tty_ctrl_charset[NCCS] = {
[VINTR] = CINTR,
[VQUIT] = CQUIT,
[VERASE] = CERASE,
[VKILL] = CKILL,
[VEOF] = CEOF,
[VSTART] = CSTART,
[VSTOP] = CSTOP,
[VSUSP] = CSUSP,
[VREPRINT] = CREPRINT,
[VDISCARD] = CDISCARD,
[VWERASE] = CWERASE,
[VLNEXT] = CLNEXT,
[VMIN] = CMIN
#undef _CONTROL
};
#undef TTY_CONF_INCLUDE_CCHARS
#endif /* __TTY_CONFIG_H__ */

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-11 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "tty_config.h"
#include "tty_internal.h"
#include "bsd_porting.h"
#include "terminal.h"
#include <fcntl.h>
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops cons_rtdev_ops;
#endif
struct backend_entry
{
rt_list_t bakend_list_node;
int prio;
rt_device_t bakdev;
};
static rt_list_t _bakend_list;
static void _bent_enqueue(struct backend_entry *bent)
{
struct backend_entry *idx;
rt_bool_t inserted = RT_FALSE;
rt_list_for_each_entry(idx, &_bakend_list, bakend_list_node)
{
if (idx->prio < bent->prio)
{
rt_list_insert_before(&idx->bakend_list_node, &bent->bakend_list_node);
inserted = RT_TRUE;
break;
}
}
if (!inserted)
rt_list_insert_after(&_bakend_list, &bent->bakend_list_node);
return ;
}
rt_err_t lwp_console_register_backend(struct rt_device *bakdev, int prio)
{
rt_err_t ret = RT_EOK;
struct backend_entry *bent;
bent = rt_malloc(sizeof(struct backend_entry));
if (bent)
{
rt_list_init(&bent->bakend_list_node);
bent->prio = prio;
bent->bakdev = bakdev;
_bent_enqueue(bent);
}
else
{
ret = -RT_ENOMEM;
}
return ret;
}
static struct rt_device _cons_rtdev;
static int fops_open(struct dfs_file *file)
{
return -EINVAL;
}
static struct dfs_file_ops _cons_fops = {
.open = fops_open,
};
static rt_err_t _cons_readlink(struct rt_device *dev, char *buf, int len)
{
int rc = -EIO;
struct backend_entry *bent;
if (!rt_list_isempty(&_bakend_list))
{
bent = rt_list_first_entry(&_bakend_list, struct backend_entry, bakend_list_node);
if (bent)
{
RT_ASSERT(bent->bakdev);
strncpy(buf, bent->bakdev->parent.name, MIN(len, RT_NAME_MAX));
LOG_D("%s: backend device %s", __func__, buf);
rc = 0;
}
}
if (rc != 0)
{
LOG_W("%s: No backend device", __func__);
}
return rc;
}
static int _cons_init(void)
{
rt_err_t rc;
rt_list_init(&_bakend_list);
/* setup system level device */
_cons_rtdev.type = RT_Device_Class_Char;
_cons_rtdev.ops = &cons_rtdev_ops;
rc = rt_device_register(&_cons_rtdev, "console", RT_DEVICE_FLAG_DYNAMIC);
if (rc == RT_EOK)
{
_cons_rtdev.readlink = &_cons_readlink;
_cons_rtdev.fops = &_cons_fops;
}
return rc;
}
INIT_DEVICE_EXPORT(_cons_init);

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-30 Shell init ver.
*/
#define DBG_TAG "lwp.ctty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#define TTY_CONF_INCLUDE_CCHARS
#include "tty_config.h"
#include "tty_internal.h"
#include "terminal.h"
static int fops_open(struct dfs_file *file)
{
return -EINVAL;
}
static rt_err_t ctty_readlink(struct rt_device *dev, char *buf, int len)
{
int rc = -ENXIO;
lwp_tty_t tp;
rt_session_t sess;
rt_processgroup_t pgrp;
rt_lwp_t lwp;
lwp = lwp_self();
if (lwp)
{
pgrp = lwp->pgrp;
if (pgrp)
{
sess = pgrp->session;
if (sess)
{
tp = sess->ctty;
if (tp)
{
tty_lock(tp);
if (lwp->pgrp == pgrp && pgrp->session == sess && sess->ctty == tp)
{
rt_strncpy(buf, tp->parent.parent.name, len);
rc = RT_EOK;
}
tty_unlock(tp);
}
}
}
}
return rc;
}
static struct dfs_file_ops ctty_file_ops = {
.open = fops_open,
};
/* character device for tty */
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops tty_dev_ops = {
/* IO directly through device is not allowed */
};
#else
#error Must enable RT_USING_DEVICE_OPS in Kconfig
#endif
rt_inline void device_setup(rt_device_t ctty)
{
ctty->type = RT_Device_Class_Char;
#ifdef RT_USING_DEVICE_OPS
ctty->ops = &tty_dev_ops;
#else
#error Must enable RT_USING_DEVICE_OPS in Kconfig
#endif
}
/* register device to DFS */
static int lwp_ctty_register(rt_device_t ctty)
{
rt_err_t rc = -RT_ENOMEM;
const char *tty_name = "tty";
device_setup(ctty);
rc = rt_device_register(ctty, tty_name, RT_DEVICE_FLAG_DYNAMIC);
if (rc == RT_EOK)
{
ctty->readlink = &ctty_readlink;
ctty->fops = &ctty_file_ops;
}
return rc;
}
static struct rt_device ctty;
static int lwp_ctty_init(void)
{
return lwp_ctty_register(&ctty);
}
INIT_DEVICE_EXPORT(lwp_ctty_init);

View File

@ -0,0 +1,456 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#define TTY_CONF_INCLUDE_CCHARS
#include "tty_config.h"
#include "tty_internal.h"
#include "terminal.h"
/* configure option: timeout of tty drain wait */
static int tty_drainwait = 5 * 60;
#define TTY_NAME_PREFIX "tty"
static char *alloc_device_name(const char *name)
{
char *tty_dev_name;
long name_buf_len = (sizeof(TTY_NAME_PREFIX) - 1) /* raw prefix */
+ rt_strlen(name) /* custom name */
+ 1; /* tailing \0 */
tty_dev_name = rt_malloc(name_buf_len);
if (tty_dev_name)
sprintf(tty_dev_name, "%s%s", TTY_NAME_PREFIX, name);
return tty_dev_name;
}
/* character device for tty */
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops tty_dev_ops = {
/* IO directly through device is not allowed */
};
#else
#error Must enable RT_USING_DEVICE_OPS in Kconfig
#endif
static int tty_fops_open(struct dfs_file *file)
{
int rc;
lwp_tty_t tp;
rt_device_t device;
int devtype = 0; /* unused */
if (file->vnode && file->vnode->data)
{
if (file->vnode->ref_count != 1)
{
rc = 0;
}
else
{
device = (rt_device_t)file->vnode->data;
tp = rt_container_of(device, struct lwp_tty, parent);
rc = bsd_ttydev_methods.d_open(tp, file->flags, devtype,
rt_thread_self());
}
}
else
{
rc = -EINVAL;
}
return rc;
}
static int tty_fops_close(struct dfs_file *file)
{
int rc;
lwp_tty_t tp;
rt_device_t device;
int fflags = FFLAGS(file->flags);
int devtype = 0; /* unused */
if (file->vnode && file->vnode->data)
{
if (file->vnode->ref_count != 1)
{
rc = 0;
}
else
{
device = (rt_device_t)file->vnode->data;
tp = rt_container_of(device, struct lwp_tty, parent);
rc = bsd_ttydev_methods.d_close(tp, fflags, devtype, rt_thread_self());
}
}
else
{
rc = -EINVAL;
}
return rc;
}
static int tty_fops_ioctl(struct dfs_file *file, int cmd, void *arg)
{
int rc;
lwp_tty_t tp;
rt_device_t device;
if (file->vnode && file->vnode->data)
{
device = (rt_device_t)file->vnode->data;
tp = rt_container_of(device, struct lwp_tty, parent);
rc = lwp_tty_ioctl_adapter(tp, cmd, file->flags, arg, rt_thread_self());
}
else
{
rc = -EINVAL;
}
return rc;
}
static ssize_t tty_fops_read(struct dfs_file *file, void *buf, size_t count,
off_t *pos)
{
ssize_t rc = 0;
int error;
struct uio uio;
struct iovec iov;
rt_device_t device;
struct lwp_tty *tp;
int ioflags;
int oflags = file->flags;
if (file->vnode && file->vnode->data)
{
device = (rt_device_t)file->vnode->data;
tp = rt_container_of(device, struct lwp_tty, parent);
/* setup ioflags */
ioflags = 0;
if (oflags & O_NONBLOCK)
ioflags |= IO_NDELAY;
/* setup uio parameters */
iov.iov_base = (void *)buf;
iov.iov_len = count;
uio.uio_offset = file->fpos;
uio.uio_resid = count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_rw = UIO_READ;
rc = count;
error = bsd_ttydev_methods.d_read(tp, &uio, ioflags);
rc -= uio.uio_resid;
if (error)
{
LOG_D("%s: failed to write %d bytes of data. error code %d",
__func__, uio.uio_resid, error);
rc = error;
}
/* reset file context */
file->fpos = uio.uio_offset;
}
if (rc)
LOG_D("%s(len=%d, buf=%c \"%d\")", __func__, rc, *((char *)buf),
*((char *)buf));
return rc;
}
static ssize_t tty_fops_write(struct dfs_file *file, const void *buf,
size_t count, off_t *pos)
{
ssize_t rc = 0;
int error;
struct uio uio;
struct iovec iov;
rt_device_t device;
struct lwp_tty *tp;
int ioflags;
int oflags = file->flags;
if (file->vnode && file->vnode->data)
{
device = (rt_device_t)file->vnode->data;
tp = rt_container_of(device, struct lwp_tty, parent);
/* setup ioflags */
ioflags = 0;
if (oflags & O_NONBLOCK)
ioflags |= IO_NDELAY;
/* setup uio parameters */
iov.iov_base = (void *)buf;
iov.iov_len = count;
uio.uio_offset = file->fpos;
uio.uio_resid = count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_rw = UIO_WRITE;
rc = count;
error = bsd_ttydev_methods.d_write(tp, &uio, ioflags);
if (error)
{
rc = error;
LOG_D("%s: failed to write %d bytes of data. error code %d",
__func__, uio.uio_resid, error);
}
else
{
rc -= uio.uio_resid;
}
/* reset file context */
file->fpos = uio.uio_offset;
}
return rc;
}
static int tty_fops_flush(struct dfs_file *file)
{
return -EINVAL;
}
static off_t tty_fops_lseek(struct dfs_file *file, off_t offset, int wherece)
{
return -EINVAL;
}
static int tty_fops_truncate(struct dfs_file *file, off_t offset)
{
/**
* regarding to POSIX.1, TRUNC is not supported for tty device.
* return 0 always to make filesystem happy
*/
return 0;
}
static int tty_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
{
int rc;
rt_device_t device;
struct lwp_tty *tp;
if (file->vnode && file->vnode->data)
{
device = (rt_device_t)file->vnode->data;
tp = rt_container_of(device, struct lwp_tty, parent);
rc = bsd_ttydev_methods.d_poll(tp, req, rt_thread_self());
}
else
{
rc = -1;
}
return rc;
}
static int tty_fops_mmap(struct dfs_file *file, struct lwp_avl_struct *mmap)
{
return -EINVAL;
}
static int tty_fops_lock(struct dfs_file *file, struct file_lock *flock)
{
return -EINVAL;
}
static int tty_fops_flock(struct dfs_file *file, int operation, struct file_lock *flock)
{
return -EINVAL;
}
static struct dfs_file_ops tty_file_ops = {
.open = tty_fops_open,
.close = tty_fops_close,
.ioctl = tty_fops_ioctl,
.read = tty_fops_read,
.write = tty_fops_write,
.flush = tty_fops_flush,
.lseek = tty_fops_lseek,
.truncate = tty_fops_truncate,
.poll = tty_fops_poll,
.mmap = tty_fops_mmap,
.lock = tty_fops_lock,
.flock = tty_fops_flock,
};
rt_inline void device_setup(lwp_tty_t terminal)
{
terminal->parent.type = RT_Device_Class_Char;
#ifdef RT_USING_DEVICE_OPS
terminal->parent.ops = &tty_dev_ops;
#else
#error Must enable RT_USING_DEVICE_OPS in Kconfig
#endif
}
/* register TTY device */
rt_err_t lwp_tty_register(lwp_tty_t terminal, const char *name)
{
rt_err_t rc = -RT_ENOMEM;
const char *tty_name;
char *alloc_name;
if (terminal->t_devsw->tsw_flags & TF_NOPREFIX)
{
alloc_name = RT_NULL;
tty_name = name;
}
else
{
alloc_name = alloc_device_name(name);
tty_name = alloc_name;
}
if (tty_name)
{
device_setup(terminal);
rc = rt_device_register(&terminal->parent, tty_name, 0);
if (rc == RT_EOK)
{
terminal->parent.fops = &tty_file_ops;
LOG_D("%s() /dev/%s device registered", __func__, tty_name);
}
rt_free(alloc_name);
}
return rc;
}
static void tty_init_termios(lwp_tty_t tp)
{
struct termios *t = &tp->t_termios_init_in;
t->c_cflag = TTYDEF_CFLAG;
t->c_iflag = TTYDEF_IFLAG;
t->c_lflag = TTYDEF_LFLAG;
t->c_oflag = TTYDEF_OFLAG;
t->__c_ispeed = TTYDEF_SPEED;
t->__c_ospeed = TTYDEF_SPEED;
memcpy(&t->c_cc, tty_ctrl_charset,
sizeof(tty_ctrl_charset) / sizeof(tty_ctrl_charset[0]));
#ifdef USING_BSD_INIT_LOCK_DEVICE
tp->t_termios_init_out = *t;
#endif /* USING_BSD_INIT_LOCK_DEVICE */
}
lwp_tty_t lwp_tty_create_ext(lwp_ttydevsw_t handle, void *softc,
rt_mutex_t custom_mtx)
{
lwp_tty_t tp;
tp = rt_calloc(1, sizeof(struct lwp_tty)
#ifdef USING_BSD_SIGINFO
+ LWP_TTY_PRBUF_SIZE
#endif
);
if (!tp)
return tp;
bsd_devsw_init(handle);
#ifdef USING_BSD_SIGINFO
tp->t_prbufsz = LWP_TTY_PRBUF_SIZE;
#endif
tp->t_devsw = handle;
tp->t_devswsoftc = softc;
tp->t_flags = handle->tsw_flags;
tp->t_drainwait = tty_drainwait;
tty_init_termios(tp);
cv_init(&tp->t_inwait, "ttyin");
cv_init(&tp->t_outwait, "ttyout");
cv_init(&tp->t_outserwait, "ttyosr");
cv_init(&tp->t_bgwait, "ttybg");
cv_init(&tp->t_dcdwait, "ttydcd");
rt_wqueue_init(&tp->t_inpoll);
rt_wqueue_init(&tp->t_outpoll);
/* Allow drivers to use a custom mutex to lock the TTY. */
if (custom_mtx != NULL)
{
tp->t_mtx = custom_mtx;
}
else
{
tp->t_mtx = &tp->t_mtxobj;
rt_mutex_init(&tp->t_mtxobj, "ttydev", RT_IPC_FLAG_PRIO);
}
#ifdef USING_BSD_POLL
knlist_init_mtx(&tp->t_inpoll.si_note, tp->t_mtx);
knlist_init_mtx(&tp->t_outpoll.si_note, tp->t_mtx);
#endif
return tp;
}
lwp_tty_t lwp_tty_create(lwp_ttydevsw_t handle, void *softc)
{
return lwp_tty_create_ext(handle, softc, NULL);
}
void lwp_tty_delete(lwp_tty_t tp)
{
/*
* ttyydev_leave() usually frees the i/o queues earlier, but it is
* not always called between queue allocation and here. The queues
* may be allocated by ioctls on a pty control device without the
* corresponding pty slave device ever being open, or after it is
* closed.
*/
ttyinq_free(&tp->t_inq);
ttyoutq_free(&tp->t_outq);
rt_wqueue_wakeup_all(&tp->t_inpoll, (void *)POLLHUP);
rt_wqueue_wakeup_all(&tp->t_outpoll, (void *)POLLHUP);
#ifdef USING_BSD_POLL
knlist_destroy(&tp->t_inpoll.si_note);
knlist_destroy(&tp->t_outpoll.si_note);
#endif
cv_destroy(&tp->t_inwait);
cv_destroy(&tp->t_outwait);
cv_destroy(&tp->t_bgwait);
cv_destroy(&tp->t_dcdwait);
cv_destroy(&tp->t_outserwait);
if (tp->t_mtx == &tp->t_mtxobj)
rt_mutex_detach(&tp->t_mtxobj);
ttydevsw_free(tp);
rt_device_unregister(&tp->parent);
rt_free(tp);
}
/*
* Report on state of foreground process group.
*/
void tty_info(struct lwp_tty *tp)
{
/* TODO */
return;
}

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#ifndef __LWP_TTY_INTERNAL_H__
#define __LWP_TTY_INTERNAL_H__
#include "lwp.h"
#include "terminal.h"
extern struct cdevsw bsd_ttydev_methods;
extern struct bsd_fileops bsd_ptsdev_methods;
/* bsd devsw porting */
void bsd_devsw_init(struct lwp_ttydevsw *tsw);
/**
* Do not assert RTS or DTR automatically. If CNO_RTSDTR is set then the RTS and
* DTR lines will not be asserted when the device is opened. As a result, this
* flag is only useful on initial-state devices.
*
* Note: this feature is not using on smart system, so this flag is always 0.
*/
#define CNO_RTSDTR 0
/* Waking up readers/writers. */
int tty_wait(struct lwp_tty *tp, struct rt_condvar *cv);
int tty_wait_background(struct lwp_tty *tp, struct rt_thread *td, int sig);
int tty_timedwait(struct lwp_tty *tp, struct rt_condvar *cv, rt_tick_t timeout);
void tty_wakeup(struct lwp_tty *tp, int flags);
void tty_info(struct lwp_tty *tp);
void pts_set_lock(lwp_tty_t pts, rt_bool_t is_lock);
rt_bool_t pts_is_locked(lwp_tty_t pts);
int pts_get_pktmode(lwp_tty_t pts);
int pts_alloc(int fflags, struct rt_thread *td, struct dfs_file *ptm_file);
int lwp_tty_ioctl_adapter(lwp_tty_t tp, int cmd, int oflags, void *args, rt_thread_t td);
int lwp_tty_set_ctrl_proc(lwp_tty_t tp, rt_thread_t td);
int lwp_tty_assign_foreground(lwp_tty_t tp, rt_thread_t td, int pgid);
int lwp_tty_bg_stop(struct lwp_tty *tp, struct rt_condvar *cv);
rt_inline rt_bool_t is_sess_leader(rt_lwp_t p)
{
/**
* Note: a pgrp leader is never lose its group, so once it's
* true then it's always true
*/
return p->pid == p->sid;
}
rt_inline int tty_is_ctty(struct lwp_tty *tp, struct rt_lwp *p)
{
tty_assert_locked(tp);
return p->pgrp->session == tp->t_session && p->term_ctrlterm;
}
#endif /* __LWP_TTY_INTERNAL_H__ */

View File

@ -0,0 +1,342 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-07 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "tty_config.h"
#include "tty_internal.h"
#include "bsd_porting.h"
#include "terminal.h"
#include <fcntl.h>
static struct dfs_file_ops ptm_fops;
static int ptm_fops_open(struct dfs_file *file)
{
int rc;
rt_uint32_t oflags = file->flags;
rt_thread_t cur_thr = rt_thread_self();
if (file->vnode && file->vnode->data)
{
/**
* Filter out illegal flags
*/
if ((oflags & ~(O_RDWR | O_NOCTTY | O_CLOEXEC | O_LARGEFILE)) == 0)
{
rc = pts_alloc(FFLAGS(oflags & O_ACCMODE), cur_thr, file);
/* detached operation from devfs */
if (rc == 0)
file->vnode->fops = &ptm_fops;
}
else
{
rc = -EINVAL;
}
}
else
{
rc = -EINVAL;
}
return rc;
}
static int ptm_fops_close(struct dfs_file *file)
{
int rc;
lwp_tty_t tp;
rt_device_t device;
if (file->data)
{
device = (rt_device_t)file->data;
tp = rt_container_of(device, struct lwp_tty, parent);
rc = bsd_ptsdev_methods.fo_close(tp, rt_thread_self());
}
else
{
rc = -EINVAL;
}
return rc;
}
static ssize_t ptm_fops_read(struct dfs_file *file, void *buf, size_t count,
off_t *pos)
{
ssize_t rc = 0;
int error;
struct uio uio;
struct iovec iov;
rt_device_t device;
struct lwp_tty *tp;
int oflags = file->flags;
if (file->data)
{
device = (rt_device_t)file->data;
tp = rt_container_of(device, struct lwp_tty, parent);
/* setup uio parameters */
iov.iov_base = (void *)buf;
iov.iov_len = count;
uio.uio_offset = file->fpos;
uio.uio_resid = count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_rw = UIO_READ;
rc = count;
error =
bsd_ptsdev_methods.fo_read(tp, &uio, 0, oflags, rt_thread_self());
rc -= uio.uio_resid;
if (error)
{
rc = error;
}
/* reset file context */
file->fpos = uio.uio_offset;
}
return rc;
}
static ssize_t ptm_fops_write(struct dfs_file *file, const void *buf,
size_t count, off_t *pos)
{
ssize_t rc = 0;
int error;
struct uio uio;
struct iovec iov;
rt_device_t device;
struct lwp_tty *tp;
int oflags = file->flags;
if (file->data)
{
device = (rt_device_t)file->data;
tp = rt_container_of(device, struct lwp_tty, parent);
/* setup uio parameters */
iov.iov_base = (void *)buf;
iov.iov_len = count;
uio.uio_offset = file->fpos;
uio.uio_resid = count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_rw = UIO_WRITE;
rc = count;
error =
bsd_ptsdev_methods.fo_write(tp, &uio, 0, oflags, rt_thread_self());
if (error)
{
rc = error;
}
else
{
rc -= uio.uio_resid;
}
/* reset file context */
file->fpos = uio.uio_offset;
}
return rc;
}
static int ptm_fops_ioctl(struct dfs_file *file, int cmd, void *arg)
{
int rc;
lwp_tty_t tp;
rt_device_t device;
rt_ubase_t cmd_normal = (unsigned int)cmd;
if (file->data)
{
device = (rt_device_t)file->data;
tp = rt_container_of(device, struct lwp_tty, parent);
switch (cmd_normal)
{
case TIOCSPTLCK:
{
int is_lock;
if (lwp_get_from_user(&is_lock, arg, sizeof(int)) != sizeof(int))
return -EFAULT;
pts_set_lock(tp, is_lock);
rc = 0;
}
break;
case TIOCGPTLCK:
{
int is_lock = pts_is_locked(tp);
if (lwp_put_to_user(arg, &is_lock, sizeof(int)) != sizeof(int))
return -EFAULT;
rc = 0;
}
break;
case TIOCGPKT:
{
int pktmode = pts_get_pktmode(tp);
if (lwp_put_to_user(arg, &pktmode, sizeof(int)) != sizeof(int))
return -EFAULT;
rc = 0;
}
break;
default:
rc = bsd_ptsdev_methods.fo_ioctl(
tp, cmd_normal, arg, 0, FFLAGS(file->flags), rt_thread_self());
break;
}
}
else
{
rc = -EINVAL;
}
return rc;
}
static int ptm_fops_flush(struct dfs_file *file)
{
return -EINVAL;
}
static off_t ptm_fops_lseek(struct dfs_file *file, off_t offset, int wherece)
{
return -EINVAL;
}
static int ptm_fops_truncate(struct dfs_file *file, off_t offset)
{
return -EINVAL;
}
static int ptm_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
{
int rc;
rt_device_t device;
struct lwp_tty *tp;
if (file->data)
{
device = (rt_device_t)file->data;
tp = rt_container_of(device, struct lwp_tty, parent);
rc = bsd_ptsdev_methods.fo_poll(tp, req, 0, rt_thread_self());
}
else
{
rc = -1;
}
return rc;
}
static int ptm_fops_mmap(struct dfs_file *file, struct lwp_avl_struct *mmap)
{
return -EINVAL;
}
static int ptm_fops_lock(struct dfs_file *file, struct file_lock *flock)
{
return -EINVAL;
}
static int ptm_fops_flock(struct dfs_file *file, int operation, struct file_lock *flock)
{
return -EINVAL;
}
static struct dfs_file_ops ptm_fops = {
.open = ptm_fops_open,
.close = ptm_fops_close,
.ioctl = ptm_fops_ioctl,
.read = ptm_fops_read,
.write = ptm_fops_write,
.flush = ptm_fops_flush,
.lseek = ptm_fops_lseek,
.truncate = ptm_fops_truncate,
.poll = ptm_fops_poll,
.mmap = ptm_fops_mmap,
.lock = ptm_fops_lock,
.flock = ptm_fops_flock,
};
rt_err_t lwp_ptmx_init(rt_device_t ptmx_device, const char *root_path)
{
char *device_name;
int root_len;
const char *dev_rel_path;
rt_err_t rc;
root_len = strlen(root_path);
dev_rel_path = "/ptmx";
device_name = rt_malloc(root_len + sizeof("/ptmx"));
if (device_name)
{
/* Register device */
sprintf(device_name, "%s%s", root_path, dev_rel_path);
rt_device_register(ptmx_device, device_name, 0);
/* Setup fops */
ptmx_device->fops = &ptm_fops;
rt_free(device_name);
rc = RT_EOK;
}
else
{
rc = -RT_ENOMEM;
}
return rc;
}
/* system level ptmx */
static struct rt_device sysptmx;
static struct dfs_file_ops sysptmx_file_ops;
static rt_err_t sysptmx_readlink(struct rt_device *dev, char *buf, int len)
{
int rc = 0;
/* TODO: support multi-root ? */
strncpy(buf, "pts/ptmx", len);
return rc;
}
static int _sys_ptmx_init(void)
{
rt_err_t rc;
rt_device_t sysptmx_rtdev = &sysptmx;
/* setup system level device */
sysptmx_rtdev->type = RT_Device_Class_Char;
sysptmx_rtdev->ops = RT_NULL;
rc = rt_device_register(sysptmx_rtdev, "ptmx", RT_DEVICE_FLAG_DYNAMIC);
if (rc == RT_EOK)
{
sysptmx_rtdev->readlink = &sysptmx_readlink;
sysptmx_rtdev->fops = &sysptmx_file_ops;
}
return rc;
}
INIT_DEVICE_EXPORT(_sys_ptmx_init);

View File

@ -0,0 +1,4 @@
menuconfig RT_USING_VDSO
bool "vDSO"
default y
depends on RT_USING_SMART && ARCH_ARMV8

View File

@ -0,0 +1,48 @@
import os
import rtconfig
import subprocess
from building import *
Import('RTT_ROOT')
group = []
cwd = GetCurrentDir()
CPPPATH = [cwd, cwd + "/kernel"]
if not GetDepend(['RT_USING_VDSO']):
Return('group')
if rtconfig.ARCH != "aarch64":
src = Glob('*.c')
group = DefineGroup('VDSO', src, depend = ['RT_USING_SMART','RT_USING_VDSO'], CPPPATH = CPPPATH)
Return('group')
list = os.listdir(cwd)
src = Glob('kernel/*.c')
src +=Glob('kernel/*.S')
if not os.path.exists(cwd + "/user/vdso.lds"):
Preprocessing("user/vdso.lds.S", ".lds", CPPPATH=[cwd])
#aarch64 vdso xmake
# vdso_file = os.path.join(cwd, 'usr', 'xmake.lua')
# command = ["xmake", "-F", vdso_file]
# clean = ["xmake", "clean"]
vdso_file = os.path.join(cwd, 'user', 'SConstruct')
command = ["scons", "-f", vdso_file]
clean = ["scons", "-f", vdso_file, "--clean"]
if not GetOption('clean'):
result = subprocess.run(command)
else:
result = subprocess.run(clean)
if result.returncode == 0:
print("Command executed successfully")
else:
print("Command failed with exit code:", result.returncode)
exit(1)
group = DefineGroup('VDSO', src, depend = ['RT_USING_SMART','RT_USING_VDSO'], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#ifndef _VDSO_H
#define _VDSO_H
#include <lwp.h>
#include <mmu.h>
#include <vdso_config.h>
#include <vdso_datapage.h>
#ifdef __cplusplus
extern "C" {
#endif
extern char __vdso_text_start[];
extern char __vdso_text_end[];
#define ELF_HEAD "\177ELF"
#define ELF_HEAD_LEN 4
#define MAX_PAGES 5
#define __page_aligned_data __attribute__((section(".data.vdso.datapage"))) __attribute__((aligned(VDSO_PAGE_SIZE)))
int arch_setup_additional_pages(struct rt_lwp *lwp);
void rt_vdso_update_glob_time(void);
#ifdef __cplusplus
}
#endif
#endif /* _VDSO_H */

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#ifndef _VDSO_KDATA_H
#define _VDSO_KDATA_H
#include <rtatomic.h>
#include <vdso_datapage.h>
#ifdef __cplusplus
extern "C" {
#endif
extern struct vdso_data *vdso_data;
rt_inline
struct vdso_data *_get_k_vdso_data(void)
{
return vdso_data;
}
#define get_k_vdso_data _get_k_vdso_data
rt_inline
void rt_vdso_write_begin(struct vdso_data *vd)
{
rt_atomic_add(&vd[CS_HRES_COARSE].seq, 1);
rt_atomic_add(&vd[CS_RAW].seq, 1);
}
rt_inline
void rt_vdso_write_end(struct vdso_data *vd)
{
rt_atomic_add(&vd[CS_HRES_COARSE].seq, 1);
rt_atomic_add(&vd[CS_RAW].seq, 1);
}
#ifdef __cplusplus
}
#endif
#endif /* _VDSO_KDATA_H */

View File

@ -0,0 +1,21 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#include <vdso_config.h>
.globl __vdso_text_start, __vdso_text_end
.section .rodata
.balign VDSO_PAGE_SIZE
__vdso_text_start:
.incbin VDSO_PATH
.balign VDSO_PAGE_SIZE
__vdso_text_end:
.previous

View File

@ -0,0 +1,39 @@
import os
import sys
import subprocess
arguments = sys.argv[2]
vdso_usr = os.path.dirname(arguments)
vdso_root = os.path.dirname(vdso_usr)
EXEC_PATH = os.getenv('RTT_EXEC_PATH') or '/usr/bin'
PREFIX = os.getenv('RTT_CC_PREFIX') or 'aarch64-none-elf-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
CPP = PREFIX + 'cpp'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
DEVICE = ' -march=armv8-a -mtune=cortex-a53 -ftree-vectorize -ffast-math -funwind-tables -fno-strict-aliasing'
CXXFLAGS = DEVICE + ' -Wall -fdiagnostics-color=always'
AFLAGS = ' -x assembler-with-cpp'
CFLAGS = DEVICE + ' -Wall -Wno-cpp -std=gnu99 -fdiagnostics-color=always -fPIC -O2'
LFLAGS = DEVICE + ' -Bsymbolic -Wl,--gc-sections,-u,system_vectors -T {path}/vdso.lds'.format(path=vdso_usr)
CFLAGS += " -I {path} -I{path}/user".format(path=vdso_root)
env = Environment(tools=['gcc', 'link'],
AS = AS, ASFLAGS = AFLAGS,
CC = CC, CFLAGS = CFLAGS,
CXX = CXX, CXXFLAGS = CXXFLAGS,
AR = AR,
LINK = LINK, LINKFLAGS = LFLAGS)
env.PrependENVPath('PATH', EXEC_PATH)
src = os.path.join(vdso_usr,'vdso_sys.c')
target_name = 'librtos_vdso.so'
target = os.path.join(vdso_usr, "build", target_name)
shared_lib = env.SharedLibrary(target=target, source=src)
env.Default(shared_lib)

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#include <vdso_config.h>
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
SECTIONS
{
PROVIDE(_vdso_data = . - __VVAR_PAGES * VDSO_PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.dynamic : { *(.dynamic) } :text :dynamic
.rela.dyn : ALIGN(8) { *(.rela .rela*) }
.rodata : {
*(.rodata*)
*(.got)
*(.got.plt)
*(.plt)
*(.plt.*)
*(.iplt)
*(.igot .igot.plt)
} :text
/DISCARD/ : {
*(.data .data.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS;
dynamic PT_DYNAMIC FLAGS(4);
}
VERSION
{
LINUX_2.6.39 {
global:
__kernel_clock_gettime;
local: *;
};
}

View File

@ -0,0 +1,95 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#include <stdio.h>
#include <time.h>
#include <errno.h>
#include <stdbool.h>
#include <vdso_sys.h>
#ifndef rt_vdso_cycles_ready
static inline bool rt_vdso_cycles_ready(uint64_t cycles)
{
return true;
}
#endif
#ifndef rt_vdso_get_ns
static inline
uint64_t rt_vdso_get_ns(uint64_t cycles, uint64_t last)
{
return (cycles - last) * NSEC_PER_SEC / __arch_get_hw_frq();
}
#endif
static int
__rt_vdso_getcoarse(struct timespec *ts, clockid_t clock, const struct vdso_data *vdns)
{
const struct vdso_data *vd;
const struct timespec *vdso_ts;
uint32_t seq;
uint64_t sec, last, ns, cycles;
if (clock != CLOCK_MONOTONIC_RAW)
vd = &vdns[CS_HRES_COARSE];
else
vd = &vdns[CS_RAW];
vdso_ts = &vd->basetime[clock];
do {
seq = rt_vdso_read_begin(vd);
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!rt_vdso_cycles_ready(cycles)))
return -1;
ns = vdso_ts->tv_nsec;
last = vd->cycle_last;
ns += rt_vdso_get_ns(cycles, last);
sec = vdso_ts->tv_sec;
} while (unlikely(rt_vdso_read_retry(vd, seq)));
ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
static inline int
__vdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
struct timespec *ts)
{
u_int32_t msk;
if (unlikely((u_int32_t) clock >= MAX_CLOCKS))
return -1;
msk = 1U << clock;
if (likely(msk & VDSO_REALTIME))
return __rt_vdso_getcoarse(ts,CLOCK_REALTIME,vd);
else if (msk & VDSO_MONOTIME)
return __rt_vdso_getcoarse(ts,CLOCK_MONOTONIC,vd);
else
return ENOENT;
}
static __maybe_unused int
rt_vdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
struct timespec *ts)
{
int ret = 0;
ret = __vdso_clock_gettime_common(vd, clock, ts);
return ret;
}
int
__kernel_clock_gettime(clockid_t clock, struct timespec *ts)
{
return rt_vdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
}

View File

@ -0,0 +1,153 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#ifndef ASM_VDSO_SYS_H
#define ASM_VDSO_SYS_H
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <vdso_config.h>
#include <vdso_datapage.h>
#define __always_unused __attribute__((__unused__))
#define __maybe_unused __attribute__((__unused__))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define arch_counter_enforce_ordering(val) do { \
uint64_t tmp, _val = (val); \
\
asm volatile( \
" eor %0, %1, %1\n" \
" add %0, sp, %0\n" \
" ldr xzr, [%0]" \
: "=r" (tmp) : "r" (_val)); \
} while (0)
static inline uint64_t __arch_get_hw_counter()
{
uint64_t res;
__asm__ volatile("mrs %0, CNTVCT_EL0":"=r"(res));
arch_counter_enforce_ordering(res);
return res;
}
static inline uint64_t __arch_get_hw_frq()
{
uint64_t res;
__asm__ volatile("mrs %0, CNTFRQ_EL0":"=r"(res));
arch_counter_enforce_ordering(res);
return res;
}
static inline uint32_t
__iter_div_u64_rem(uint64_t dividend, uint32_t divisor, uint64_t *remainder)
{
uint32_t ret = 0;
while (dividend >= divisor) {
/* The following asm() prevents the compiler from
optimising this loop into a modulo operation. */
__asm__("" : "+rm"(dividend));
dividend -= divisor;
ret++;
}
*remainder = dividend;
return ret;
}
#define __RT_STRINGIFY(x...) #x
#define RT_STRINGIFY(x...) __RT_STRINGIFY(x)
#define rt_hw_barrier(cmd, ...) \
__asm__ volatile (RT_STRINGIFY(cmd) " "RT_STRINGIFY(__VA_ARGS__):::"memory")
#define rt_hw_isb() rt_hw_barrier(isb)
#define rt_hw_dmb() rt_hw_barrier(dmb, ish)
#define rt_hw_wmb() rt_hw_barrier(dmb, ishst)
#define rt_hw_rmb() rt_hw_barrier(dmb, ishld)
#define rt_hw_dsb() rt_hw_barrier(dsb, ish)
#ifndef barrier
/* The "volatile" is due to gcc bugs */
# define barrier() __asm__ __volatile__("": : :"memory")
#endif
static inline void cpu_relax(void)
{
__asm__ volatile("yield" ::: "memory");
}
#define __READ_ONCE_SIZE \
({ \
switch (size) { \
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
default: \
barrier(); \
__builtin_memcpy((void *)res, (const void *)p, size); \
barrier(); \
} \
})
static inline
void __read_once_size(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
#define __READ_ONCE(x, check) \
({ \
union { typeof(x) __val; char __c[1]; } __u; \
if (check) \
__read_once_size(&(x), __u.__c, sizeof(x)); \
smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
__u.__val; \
})
#define READ_ONCE(x) __READ_ONCE(x, 1)
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
static inline struct vdso_data *__arch_get_vdso_data(void)
{
return _vdso_data;
}
static inline uint32_t rt_vdso_read_begin(const struct vdso_data *vd)
{
uint32_t seq;
while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
cpu_relax();
rt_hw_rmb();
return seq;
}
static inline uint32_t rt_vdso_read_retry(const struct vdso_data *vd,
uint32_t start)
{
uint32_t seq;
rt_hw_rmb();
seq = READ_ONCE(vd->seq);
return seq != start;
}
#endif

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
toolchain("aarch64-smart-musleabi")
set_kind("standalone")
local exec_path = os.getenv("RTT_EXEC_PATH") or "/opt/aarch64-linux-musleabi/bin/"
local sdkdir = exec_path .. "/../"
local incdir = os.curdir() .. "/../include"
local device = '-march=armv8-a -mtune=cortex-a53 -ftree-vectorize -ffast-math -funwind-tables -fno-strict-aliasing'
set_bindir(exec_path)
set_sdkdir(sdkdir)
set_toolset("sh", "aarch64-linux-musleabi-gcc")
on_load(function(toolchain)
toolchain:load_cross_toolchain()
toolchain:add("cxflags", device)
toolchain:add("cxflags", "-Wall -Wno-cpp -std=gnu99")
toolchain:add("cxflags", "-fdiagnostics-color=always")
toolchain:add("cxflags", "-O2")
toolchain:add("cxflags", "-I" .. incdir)
toolchain:add("shflags", device)
toolchain:add("shflags", "-Wl,--gc-sections")
toolchain:add("shflags", "-u,system_vectors")
toolchain:add("shflags", "-T vdso.lds")
end)
toolchain_end()
set_config("plat", "cross")
set_config("target_os", "rt-smart")
set_config("arch", "aarch64")
rule("vdso_lds")
set_extensions(".lds.S")
on_buildcmd_file(function (target, batchcmds, sourcefile, opt)
local incdir = os.curdir() .. "/../include"
local targetfile = path.basename(sourcefile)
local prefix = os.getenv("RTT_CC_PREFIX=") or "aarch64-linux-musleabi-"
batchcmds:vrunv(prefix .. "gcc", {"-E", "-P", sourcefile, "-o", targetfile, "-I", incdir})
end)
target("rtos_vdso")
set_toolchains("aarch64-smart-musleabi")
add_rules("vdso_lds")
set_kind("shared")
add_files("vdso.lds.S")
add_files("vdso_sys.c")
set_targetdir("build")
target_end()

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
#ifdef __cplusplus
extern "C" {
#endif
#define __VVAR_PAGES 2
#define VDSO_PAGE_SHIFT 12
#define VDSO_PAGE_SIZE (1 << VDSO_PAGE_SHIFT)
#define BIT_MASK(nr) ((1) << (nr))
#ifndef read_barrier_depends
#define read_barrier_depends() do { } while (0)
#endif
#ifndef smp_read_barrier_depends
#define smp_read_barrier_depends() read_barrier_depends()
#endif
#define VDSO_PATH "../user/build/librtos_vdso.so"
#ifdef __cplusplus
}
#endif
#endif /* __ASM_VDSO_H */

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#ifndef _VDSO_DATAPAGE_H
#define _VDSO_DATAPAGE_H
#include <time.h>
#include <sys/types.h>
#include "vdso_config.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef signed char __s8;
typedef signed short __s16;
typedef signed int __s32;
typedef signed long __s64;
typedef unsigned char __u8;
typedef unsigned short __u16;
typedef unsigned int __u32;
typedef unsigned long __u64;
#define MAX_CLOCKS 16
#define VDSO_BASES (CLOCK_TAI + 1)
#define VDSO_REALTIME (BIT_MASK(CLOCK_REALTIME) | \
BIT_MASK(CLOCK_REALTIME_COARSE))
#define VDSO_MONOTIME (BIT_MASK(CLOCK_MONOTONIC) | \
BIT_MASK(CLOCK_MONOTONIC_COARSE) | \
BIT_MASK(CLOCK_MONOTONIC_RAW) | \
BIT_MASK(CLOCK_BOOTTIME))
#define CS_HRES_COARSE 0
#define CS_RAW 1
#define CS_BASES (CS_RAW + 1)
/* 2018-01-30 14:44:50 = RTC_TIME_INIT(2018, 1, 30, 14, 44, 50) */
#define RTC_VDSOTIME_INIT(year, month, day, hour, minute, second) \
{.tm_year = year - 1900, .tm_mon = month - 1, .tm_mday = day, .tm_hour = hour, .tm_min = minute, .tm_sec = second}
#ifndef SOFT_RTC_VDSOTIME_DEFAULT
#define SOFT_RTC_VDSOTIME_DEFAULT RTC_VDSOTIME_INIT(2018, 1, 1, 0, 0 ,0)
#endif
struct vdso_data {
uint32_t seq;
uint32_t clock_mode;
uint64_t realtime_initdata;
uint64_t cycle_last;
struct timespec basetime[VDSO_BASES];
};
typedef struct vdso_data *vdso_data_t;
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
#define NSEC_PER_USEC 1000L
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000LL
#ifdef __cplusplus
}
#endif
#endif /* _VDSO_DATAPAGE_H */

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-04 rcitach init ver.
*/
#include <rtthread.h>
#include <lwp_user_mm.h>
#include "vdso.h"
rt_weak int arch_setup_additional_pages(struct rt_lwp *lwp)
{
return -RT_ERROR;
}
rt_weak void rt_vdso_update_glob_time(void)
{
}