原始版本

This commit is contained in:
冯佳
2025-06-19 21:56:46 +08:00
parent fe98e5f010
commit a4841450cf
4152 changed files with 1910684 additions and 0 deletions

View File

@ -0,0 +1,27 @@
# RT-Thread building script for bridge
import os
from building import *
Import('rtconfig')
common64_arch = ['virt64', 'c906', 'c908']
cwd = GetCurrentDir()
group = []
list = os.listdir(cwd)
# add common code files
if rtconfig.CPU in common64_arch :
group += SConscript(os.path.join('common64', 'SConscript'))
else :
group += SConscript(os.path.join('common', 'SConscript'))
group += SConscript(os.path.join('vector', 'SConscript'))
# cpu porting code files
if 'VENDOR' in vars(rtconfig) and rtconfig.VENDOR != '':
group = group + SConscript(os.path.join(rtconfig.VENDOR, rtconfig.CPU, 'SConscript'))
elif rtconfig.CPU in list:
group = group + SConscript(os.path.join(rtconfig.CPU, 'SConscript'))
Return('group')

View File

@ -0,0 +1,15 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
ASFLAGS = ' -I ' + cwd
if not GetDepend('RT_USING_HW_ATOMIC'):
SrcRemove(src, 'atomic_riscv.c')
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH, ASFLAGS = ASFLAGS)
Return('group')

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-03-14 WangShun first version
*/
#include <rtthread.h>
rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}
rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}
rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
val = -val;
#if __riscv_xlen == 32
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}
rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}
rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}
rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}
rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
#endif
return result;
}
void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
}
rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
{
rt_atomic_t result = 0;
rt_atomic_t temp = 1;
#if __riscv_xlen == 32
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
#endif
return result;
}
void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile ("amoand.w %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile ("amoand.d %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
#endif
}
rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t desired)
{
rt_atomic_t tmp = *old;
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile(
" fence iorw, ow\n"
"1: lr.w.aq %[result], (%[ptr])\n"
" bne %[result], %[tmp], 2f\n"
" sc.w.rl %[tmp], %[desired], (%[ptr])\n"
" bnez %[tmp], 1b\n"
" li %[result], 1\n"
" j 3f\n"
" 2:sw %[result], (%[old])\n"
" li %[result], 0\n"
" 3:\n"
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
: [desired]"r" (desired), [old]"r"(old)
: "memory");
#elif __riscv_xlen == 64
asm volatile(
" fence iorw, ow\n"
"1: lr.d.aq %[result], (%[ptr])\n"
" bne %[result], %[tmp], 2f\n"
" sc.d.rl %[tmp], %[desired], (%[ptr])\n"
" bnez %[tmp], 1b\n"
" li %[result], 1\n"
" j 3f\n"
" 2:sd %[result], (%[old])\n"
" li %[result], 0\n"
" 3:\n"
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
: [desired]"r" (desired), [old]"r"(old)
: "memory");
#endif
return result;
}

View File

@ -0,0 +1,319 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting implementation
* 2018/12/27 Jesven Add SMP support
* 2020/11/20 BalanceTWK Add FPU support
* 2022/12/28 WangShun Add macro to distinguish whether FPU is supported
* 2023/03/19 Flyingcys Add riscv_32e support
*/
#define __ASSEMBLY__
#include "cpuport.h"
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif
/*
* rt_base_t rt_hw_interrupt_disable(void);
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
csrrci a0, mstatus, 8
ret
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
csrw mstatus, a0
ret
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch_to(rt_ubase_t to, stuct rt_thread *to_thread);
* #else
* void rt_hw_context_switch_to(rt_ubase_t to);
* #endif
* a0 --> to
* a1 --> to_thread
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
la t0, __rt_rvstack
#ifdef SOC_RISCV_FAMILY_CH32
/*
* if it is an assembly entry code, the SP offset value is determined by the assembly code,
* but the C code is determined by the compiler, so we subtract 512 here as a reservation.
* When entering the interrupt function of C code, the compiler automatically presses the stack
* into the task stack. We can only change the SP value used by the calling function after switching
* the interrupt stack.This problem can be solved by modifying the interrupt to the assembly entry,
* and there is no need to reserve 512 bytes. You only need to switch the interrupt stack at the
* beginning of the interrupt function
*/
addi t0, t0, -512 // for ch32
#endif /* SOC_RISCV_FAMILY_CH32 */
csrw mscratch,t0
LOAD sp, (a0)
#ifdef RT_USING_SMP
mv a0, a1
call rt_cpus_lock_status_restore
#endif
LOAD a0, 2 * REGBYTES(sp)
csrw mstatus, a0
j rt_hw_context_switch_exit
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
* #else
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* #endif
*
* a0 --> from
* a1 --> to
* a2 --> to_thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
/* saved from thread context
* x1/ra -> sp(0)
* x1/ra -> sp(1)
* mstatus.mie -> sp(2)
* x(i) -> sp(i-4)
*/
#ifdef ARCH_RISCV_FPU
addi sp, sp, -32 * FREGBYTES
FSTORE f0, 0 * FREGBYTES(sp)
FSTORE f1, 1 * FREGBYTES(sp)
FSTORE f2, 2 * FREGBYTES(sp)
FSTORE f3, 3 * FREGBYTES(sp)
FSTORE f4, 4 * FREGBYTES(sp)
FSTORE f5, 5 * FREGBYTES(sp)
FSTORE f6, 6 * FREGBYTES(sp)
FSTORE f7, 7 * FREGBYTES(sp)
FSTORE f8, 8 * FREGBYTES(sp)
FSTORE f9, 9 * FREGBYTES(sp)
FSTORE f10, 10 * FREGBYTES(sp)
FSTORE f11, 11 * FREGBYTES(sp)
FSTORE f12, 12 * FREGBYTES(sp)
FSTORE f13, 13 * FREGBYTES(sp)
FSTORE f14, 14 * FREGBYTES(sp)
FSTORE f15, 15 * FREGBYTES(sp)
FSTORE f16, 16 * FREGBYTES(sp)
FSTORE f17, 17 * FREGBYTES(sp)
FSTORE f18, 18 * FREGBYTES(sp)
FSTORE f19, 19 * FREGBYTES(sp)
FSTORE f20, 20 * FREGBYTES(sp)
FSTORE f21, 21 * FREGBYTES(sp)
FSTORE f22, 22 * FREGBYTES(sp)
FSTORE f23, 23 * FREGBYTES(sp)
FSTORE f24, 24 * FREGBYTES(sp)
FSTORE f25, 25 * FREGBYTES(sp)
FSTORE f26, 26 * FREGBYTES(sp)
FSTORE f27, 27 * FREGBYTES(sp)
FSTORE f28, 28 * FREGBYTES(sp)
FSTORE f29, 29 * FREGBYTES(sp)
FSTORE f30, 30 * FREGBYTES(sp)
FSTORE f31, 31 * FREGBYTES(sp)
#endif
#ifndef __riscv_32e
addi sp, sp, -32 * REGBYTES
#else
addi sp, sp, -16 * REGBYTES
#endif
STORE sp, (a0)
STORE x1, 0 * REGBYTES(sp)
STORE x1, 1 * REGBYTES(sp)
csrr a0, mstatus
andi a0, a0, 8
beqz a0, save_mpie
li a0, 0x80
save_mpie:
STORE a0, 2 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
#endif
/* restore to thread context
* sp(0) -> epc;
* sp(1) -> ra;
* sp(i) -> x(i+2)
*/
LOAD sp, (a1)
#ifdef RT_USING_SMP
mv a0, a2
call rt_cpus_lock_status_restore
#endif /*RT_USING_SMP*/
j rt_hw_context_switch_exit
#ifdef RT_USING_SMP
/*
* void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
*
* a0 --> context
* a1 --> from
* a2 --> to
* a3 --> to_thread
*/
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
STORE a0, 0(a1)
LOAD sp, 0(a2)
move a0, a3
call rt_cpus_lock_status_restore
j rt_hw_context_switch_exit
#endif
.global rt_hw_context_switch_exit
rt_hw_context_switch_exit:
#ifdef RT_USING_SMP
#ifdef RT_USING_SIGNALS
mv a0, sp
csrr t0, mhartid
/* switch interrupt stack of current cpu */
la sp, __stack_start__
addi t1, t0, 1
li t2, __STACKSIZE__
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */
call rt_signal_check
mv sp, a0
#endif
#endif
/* resw ra to mepc */
LOAD a0, 0 * REGBYTES(sp)
csrw mepc, a0
LOAD x1, 1 * REGBYTES(sp)
#ifdef ARCH_RISCV_FPU
li t0, 0x7800
#else
li t0, 0x1800
#endif
csrw mstatus, t0
LOAD a0, 2 * REGBYTES(sp)
csrs mstatus, a0
LOAD x4, 4 * REGBYTES(sp)
LOAD x5, 5 * REGBYTES(sp)
LOAD x6, 6 * REGBYTES(sp)
LOAD x7, 7 * REGBYTES(sp)
LOAD x8, 8 * REGBYTES(sp)
LOAD x9, 9 * REGBYTES(sp)
LOAD x10, 10 * REGBYTES(sp)
LOAD x11, 11 * REGBYTES(sp)
LOAD x12, 12 * REGBYTES(sp)
LOAD x13, 13 * REGBYTES(sp)
LOAD x14, 14 * REGBYTES(sp)
LOAD x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 16 * REGBYTES(sp)
LOAD x17, 17 * REGBYTES(sp)
LOAD x18, 18 * REGBYTES(sp)
LOAD x19, 19 * REGBYTES(sp)
LOAD x20, 20 * REGBYTES(sp)
LOAD x21, 21 * REGBYTES(sp)
LOAD x22, 22 * REGBYTES(sp)
LOAD x23, 23 * REGBYTES(sp)
LOAD x24, 24 * REGBYTES(sp)
LOAD x25, 25 * REGBYTES(sp)
LOAD x26, 26 * REGBYTES(sp)
LOAD x27, 27 * REGBYTES(sp)
LOAD x28, 28 * REGBYTES(sp)
LOAD x29, 29 * REGBYTES(sp)
LOAD x30, 30 * REGBYTES(sp)
LOAD x31, 31 * REGBYTES(sp)
addi sp, sp, 32 * REGBYTES
#else
addi sp, sp, 16 * REGBYTES
#endif
#ifdef ARCH_RISCV_FPU
FLOAD f0, 0 * FREGBYTES(sp)
FLOAD f1, 1 * FREGBYTES(sp)
FLOAD f2, 2 * FREGBYTES(sp)
FLOAD f3, 3 * FREGBYTES(sp)
FLOAD f4, 4 * FREGBYTES(sp)
FLOAD f5, 5 * FREGBYTES(sp)
FLOAD f6, 6 * FREGBYTES(sp)
FLOAD f7, 7 * FREGBYTES(sp)
FLOAD f8, 8 * FREGBYTES(sp)
FLOAD f9, 9 * FREGBYTES(sp)
FLOAD f10, 10 * FREGBYTES(sp)
FLOAD f11, 11 * FREGBYTES(sp)
FLOAD f12, 12 * FREGBYTES(sp)
FLOAD f13, 13 * FREGBYTES(sp)
FLOAD f14, 14 * FREGBYTES(sp)
FLOAD f15, 15 * FREGBYTES(sp)
FLOAD f16, 16 * FREGBYTES(sp)
FLOAD f17, 17 * FREGBYTES(sp)
FLOAD f18, 18 * FREGBYTES(sp)
FLOAD f19, 19 * FREGBYTES(sp)
FLOAD f20, 20 * FREGBYTES(sp)
FLOAD f21, 21 * FREGBYTES(sp)
FLOAD f22, 22 * FREGBYTES(sp)
FLOAD f23, 23 * FREGBYTES(sp)
FLOAD f24, 24 * FREGBYTES(sp)
FLOAD f25, 25 * FREGBYTES(sp)
FLOAD f26, 26 * FREGBYTES(sp)
FLOAD f27, 27 * FREGBYTES(sp)
FLOAD f28, 28 * FREGBYTES(sp)
FLOAD f29, 29 * FREGBYTES(sp)
FLOAD f30, 30 * FREGBYTES(sp)
FLOAD f31, 31 * FREGBYTES(sp)
addi sp, sp, 32 * FREGBYTES
#endif
mret

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting code.
* 2020/11/20 BalanceTWK Add FPU support
* 2023/01/04 WangShun Adapt to CH32
*/
#include <rthw.h>
#include <rtthread.h>
#include "cpuport.h"
#include "rt_hw_stack_frame.h"
#ifndef RT_USING_SMP
volatile rt_ubase_t rt_interrupt_from_thread = 0;
volatile rt_ubase_t rt_interrupt_to_thread = 0;
volatile rt_uint32_t rt_thread_switch_interrupt_flag = 0;
#endif
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry,
void *parameter,
rt_uint8_t *stack_addr,
void *texit)
{
struct rt_hw_stack_frame *frame;
rt_uint8_t *stk;
int i;
stk = stack_addr + sizeof(rt_ubase_t);
stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_ubase_t)stk, REGBYTES);
stk -= sizeof(struct rt_hw_stack_frame);
frame = (struct rt_hw_stack_frame *)stk;
for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
{
((rt_ubase_t *)frame)[i] = 0xdeadbeef;
}
frame->ra = (rt_ubase_t)texit;
frame->a0 = (rt_ubase_t)parameter;
frame->epc = (rt_ubase_t)tentry;
/* force to machine mode(MPP=11) and set MPIE to 1 */
#ifdef ARCH_RISCV_FPU
frame->mstatus = 0x7880;
#else
frame->mstatus = 0x1880;
#endif
return stk;
}
rt_weak void rt_trigger_software_interrupt(void)
{
while (0);
}
rt_weak void rt_hw_do_after_save_above(void)
{
while (1);
}
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
* #else
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to);
* #endif
*/
#ifndef RT_USING_SMP
rt_weak void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
{
if (rt_thread_switch_interrupt_flag == 0)
rt_interrupt_from_thread = from;
rt_interrupt_to_thread = to;
rt_thread_switch_interrupt_flag = 1;
rt_trigger_software_interrupt();
return ;
}
#endif /* end of RT_USING_SMP */

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-03 Bernard The first version
* 2020/11/20 BalanceTWK Add FPU support
* 2023/01/04 WangShun Adapt to CH32
* 2023/08/11 HPMicro Define ARCH_RISCV_FPU if FPU is enabled
*/
#ifndef CPUPORT_H__
#define CPUPORT_H__
#include <rtconfig.h>
#ifndef __ASSEMBLY__
#ifdef RT_USING_SMP
typedef union {
unsigned long slock;
struct __arch_tickets {
unsigned short owner;
unsigned short next;
} tickets;
} rt_hw_spinlock_t;
#endif
#endif
/* Preprocessor Definition */
#if __riscv_flen == 32
#define ARCH_RISCV_FPU
#define ARCH_RISCV_FPU_S
#endif
#if __riscv_flen == 64
#define ARCH_RISCV_FPU
#define ARCH_RISCV_FPU_D
#endif
/* bytes of register width */
#ifdef ARCH_CPU_64BIT
#define STORE sd
#define LOAD ld
#define REGBYTES 8
#else
#define STORE sw
#define LOAD lw
#define REGBYTES 4
#endif
/* Preprocessor Definition */
#ifdef ARCH_RISCV_FPU
#ifdef ARCH_RISCV_FPU_D
#define FSTORE fsd
#define FLOAD fld
#define FREGBYTES 8
#define rv_floatreg_t rt_int64_t
#endif
#ifdef ARCH_RISCV_FPU_S
#define FSTORE fsw
#define FLOAD flw
#define FREGBYTES 4
#define rv_floatreg_t rt_int32_t
#endif
#endif
#endif

View File

@ -0,0 +1,355 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023/01/17 WangShun The first version
* 2023/03/19 Flyingcys Add riscv_32e support
* 2023/08/09 HPMicro Fix the issue t0 was modified unexpectedly before being saved
*/
#define __ASSEMBLY__
#include "cpuport.h"
.section .text.entry, "ax"
#if defined(SOC_SERIES_GD32VF103V)
.align 6
#else
.align 2
#endif
.global SW_handler
SW_handler:
csrci mstatus, 0x8
#ifdef ARCH_RISCV_FPU
addi sp, sp, -32 * FREGBYTES
FSTORE f0, 0 * FREGBYTES(sp)
FSTORE f1, 1 * FREGBYTES(sp)
FSTORE f2, 2 * FREGBYTES(sp)
FSTORE f3, 3 * FREGBYTES(sp)
FSTORE f4, 4 * FREGBYTES(sp)
FSTORE f5, 5 * FREGBYTES(sp)
FSTORE f6, 6 * FREGBYTES(sp)
FSTORE f7, 7 * FREGBYTES(sp)
FSTORE f8, 8 * FREGBYTES(sp)
FSTORE f9, 9 * FREGBYTES(sp)
FSTORE f10, 10 * FREGBYTES(sp)
FSTORE f11, 11 * FREGBYTES(sp)
FSTORE f12, 12 * FREGBYTES(sp)
FSTORE f13, 13 * FREGBYTES(sp)
FSTORE f14, 14 * FREGBYTES(sp)
FSTORE f15, 15 * FREGBYTES(sp)
FSTORE f16, 16 * FREGBYTES(sp)
FSTORE f17, 17 * FREGBYTES(sp)
FSTORE f18, 18 * FREGBYTES(sp)
FSTORE f19, 19 * FREGBYTES(sp)
FSTORE f20, 20 * FREGBYTES(sp)
FSTORE f21, 21 * FREGBYTES(sp)
FSTORE f22, 22 * FREGBYTES(sp)
FSTORE f23, 23 * FREGBYTES(sp)
FSTORE f24, 24 * FREGBYTES(sp)
FSTORE f25, 25 * FREGBYTES(sp)
FSTORE f26, 26 * FREGBYTES(sp)
FSTORE f27, 27 * FREGBYTES(sp)
FSTORE f28, 28 * FREGBYTES(sp)
FSTORE f29, 29 * FREGBYTES(sp)
FSTORE f30, 30 * FREGBYTES(sp)
FSTORE f31, 31 * FREGBYTES(sp)
#endif
/* save all from thread context */
#ifndef __riscv_32e
addi sp, sp, -32 * REGBYTES
#else
addi sp, sp, -16 * REGBYTES
#endif
STORE x5, 5 * REGBYTES(sp)
STORE x1, 1 * REGBYTES(sp)
/* Mandatory set the MPIE of mstatus */
li t0, 0x80
STORE t0, 2 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
#endif
/* switch to interrupt stack */
csrrw sp,mscratch,sp
/* interrupt handle */
call rt_interrupt_enter
/* Do the work after saving the above */
jal rt_hw_do_after_save_above
call rt_interrupt_leave
/* switch to from thread stack */
csrrw sp,mscratch,sp
/* Determine whether to trigger scheduling at the interrupt function */
la t0, rt_thread_switch_interrupt_flag
lw t2, 0(t0)
beqz t2, 1f
/* clear the flag of rt_thread_switch_interrupt_flag */
sw zero, 0(t0)
csrr a0, mepc
STORE a0, 0 * REGBYTES(sp)
la t0, rt_interrupt_from_thread
LOAD t1, 0(t0)
STORE sp, 0(t1)
la t0, rt_interrupt_to_thread
LOAD t1, 0(t0)
LOAD sp, 0(t1)
LOAD a0, 0 * REGBYTES(sp)
csrw mepc, a0
1:
LOAD x1, 1 * REGBYTES(sp)
/* Set the mode after MRET */
li t0, 0x1800
csrs mstatus, t0
LOAD t0, 2 * REGBYTES(sp)
csrs mstatus, t0
LOAD x4, 4 * REGBYTES(sp)
LOAD x5, 5 * REGBYTES(sp)
LOAD x6, 6 * REGBYTES(sp)
LOAD x7, 7 * REGBYTES(sp)
LOAD x8, 8 * REGBYTES(sp)
LOAD x9, 9 * REGBYTES(sp)
LOAD x10, 10 * REGBYTES(sp)
LOAD x11, 11 * REGBYTES(sp)
LOAD x12, 12 * REGBYTES(sp)
LOAD x13, 13 * REGBYTES(sp)
LOAD x14, 14 * REGBYTES(sp)
LOAD x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 16 * REGBYTES(sp)
LOAD x17, 17 * REGBYTES(sp)
LOAD x18, 18 * REGBYTES(sp)
LOAD x19, 19 * REGBYTES(sp)
LOAD x20, 20 * REGBYTES(sp)
LOAD x21, 21 * REGBYTES(sp)
LOAD x22, 22 * REGBYTES(sp)
LOAD x23, 23 * REGBYTES(sp)
LOAD x24, 24 * REGBYTES(sp)
LOAD x25, 25 * REGBYTES(sp)
LOAD x26, 26 * REGBYTES(sp)
LOAD x27, 27 * REGBYTES(sp)
LOAD x28, 28 * REGBYTES(sp)
LOAD x29, 29 * REGBYTES(sp)
LOAD x30, 30 * REGBYTES(sp)
LOAD x31, 31 * REGBYTES(sp)
addi sp, sp, 32 * REGBYTES
#else
addi sp, sp, 16 * REGBYTES
#endif
#ifdef ARCH_RISCV_FPU
FLOAD f0, 0 * FREGBYTES(sp)
FLOAD f1, 1 * FREGBYTES(sp)
FLOAD f2, 2 * FREGBYTES(sp)
FLOAD f3, 3 * FREGBYTES(sp)
FLOAD f4, 4 * FREGBYTES(sp)
FLOAD f5, 5 * FREGBYTES(sp)
FLOAD f6, 6 * FREGBYTES(sp)
FLOAD f7, 7 * FREGBYTES(sp)
FLOAD f8, 8 * FREGBYTES(sp)
FLOAD f9, 9 * FREGBYTES(sp)
FLOAD f10, 10 * FREGBYTES(sp)
FLOAD f11, 11 * FREGBYTES(sp)
FLOAD f12, 12 * FREGBYTES(sp)
FLOAD f13, 13 * FREGBYTES(sp)
FLOAD f14, 14 * FREGBYTES(sp)
FLOAD f15, 15 * FREGBYTES(sp)
FLOAD f16, 16 * FREGBYTES(sp)
FLOAD f17, 17 * FREGBYTES(sp)
FLOAD f18, 18 * FREGBYTES(sp)
FLOAD f19, 19 * FREGBYTES(sp)
FLOAD f20, 20 * FREGBYTES(sp)
FLOAD f21, 21 * FREGBYTES(sp)
FLOAD f22, 22 * FREGBYTES(sp)
FLOAD f23, 23 * FREGBYTES(sp)
FLOAD f24, 24 * FREGBYTES(sp)
FLOAD f25, 25 * FREGBYTES(sp)
FLOAD f26, 26 * FREGBYTES(sp)
FLOAD f27, 27 * FREGBYTES(sp)
FLOAD f28, 28 * FREGBYTES(sp)
FLOAD f29, 29 * FREGBYTES(sp)
FLOAD f30, 30 * FREGBYTES(sp)
FLOAD f31, 31 * FREGBYTES(sp)
addi sp, sp, 32 * FREGBYTES
#endif
mret
.section .text.trap_entry
.align 2
.weak trap_entry
.global trap_entry
trap_entry:
#ifdef ARCH_RISCV_FPU
addi sp, sp, -32 * FREGBYTES
FSTORE f0, 0 * FREGBYTES(sp)
FSTORE f1, 1 * FREGBYTES(sp)
FSTORE f2, 2 * FREGBYTES(sp)
FSTORE f3, 3 * FREGBYTES(sp)
FSTORE f4, 4 * FREGBYTES(sp)
FSTORE f5, 5 * FREGBYTES(sp)
FSTORE f6, 6 * FREGBYTES(sp)
FSTORE f7, 7 * FREGBYTES(sp)
FSTORE f8, 8 * FREGBYTES(sp)
FSTORE f9, 9 * FREGBYTES(sp)
FSTORE f10, 10 * FREGBYTES(sp)
FSTORE f11, 11 * FREGBYTES(sp)
FSTORE f12, 12 * FREGBYTES(sp)
FSTORE f13, 13 * FREGBYTES(sp)
FSTORE f14, 14 * FREGBYTES(sp)
FSTORE f15, 15 * FREGBYTES(sp)
FSTORE f16, 16 * FREGBYTES(sp)
FSTORE f17, 17 * FREGBYTES(sp)
FSTORE f18, 18 * FREGBYTES(sp)
FSTORE f19, 19 * FREGBYTES(sp)
FSTORE f20, 20 * FREGBYTES(sp)
FSTORE f21, 21 * FREGBYTES(sp)
FSTORE f22, 22 * FREGBYTES(sp)
FSTORE f23, 23 * FREGBYTES(sp)
FSTORE f24, 24 * FREGBYTES(sp)
FSTORE f25, 25 * FREGBYTES(sp)
FSTORE f26, 26 * FREGBYTES(sp)
FSTORE f27, 27 * FREGBYTES(sp)
FSTORE f28, 28 * FREGBYTES(sp)
FSTORE f29, 29 * FREGBYTES(sp)
FSTORE f30, 30 * FREGBYTES(sp)
FSTORE f31, 31 * FREGBYTES(sp)
#endif
/* save thread context to thread stack */
#ifndef __riscv_32e
addi sp, sp, -32 * REGBYTES
#else
addi sp, sp, -16 * REGBYTES
#endif
STORE x1, 1 * REGBYTES(sp)
csrr x1, mstatus
STORE x1, 2 * REGBYTES(sp)
csrr x1, mepc
STORE x1, 0 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
#endif
/* switch to interrupt stack */
move s0, sp
#ifdef RT_USING_SMP
/* get cpu id */
csrr t0, mhartid
/* switch interrupt stack of current cpu */
la sp, __stack_start__
addi t1, t0, 1
li t2, __STACKSIZE__
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */
#endif
/* handle interrupt */
call rt_interrupt_enter
csrr a0, mcause
csrr a1, mepc
mv a2, s0
call handle_trap
call rt_interrupt_leave
#ifdef RT_USING_SMP
/* s0 --> sp */
mv sp, s0
mv a0, s0
call rt_scheduler_do_irq_switch
tail rt_hw_context_switch_exit
#else
/* switch to from_thread stack */
move sp, s0
/* need to switch new thread */
la s0, rt_thread_switch_interrupt_flag
lw s2, 0(s0)
beqz s2, spurious_interrupt
sw zero, 0(s0)
la s0, rt_interrupt_from_thread
LOAD s1, 0(s0)
STORE sp, 0(s1)
la s0, rt_interrupt_to_thread
LOAD s1, 0(s0)
LOAD sp, 0(s1)
#endif
spurious_interrupt:
tail rt_hw_context_switch_exit

View File

@ -0,0 +1,187 @@
### RV32移植指南
#### 1.概述
为了简化32位RISC-V架构内核移植RT-Thread的流程RT-Thread提供一分通用代码于common文件夹
| 文件名 | 文件内容 |
| :-----------------: | :----------------------------: |
| context_gcc.S | 开关全局中断,线程上下文切换等 |
| cpuport.c | 线程栈初始化,软件中断触发等 |
| cpuport.h | 数据加载写入指令 |
| interrupt_gcc.S | 线程间上下文切换 |
| riscv-ops.h | 控制状态寄存器读写 |
| rt_hw_stack_frame.h | 线程栈格式 |
| trap_common.c | 中断注册,中断查询分发 |
#### 2.移植接口
1软件中断触发函数通常向量管理中断方式需实现该函数非向量中断管理方式一般不需要
```c
void rt_trigger_software_interrupt(void)
```
2保存上文后执行函数该函数向量中断与非向量中断均需实现
```c
void rt_hw_do_after_save_above(void)
```
该函数需要实现的工作如下:
步骤1将函数返回地址(ra)保存栈中
步骤2加载中断处理函数的入口参数
步骤3调用中断处理函数新移植的BSP推荐使用RT-Thread common_trap.c文件中提供的统一中断处理函数:rt_rv32_system_irq_handler
步骤4从栈中加载返回地址(ra)返回至SW_handler函数
#### 3.准备工作
- 准备一个基础的裸机工程,需具备以下条件:
- 明确中断管理方式(向量中断/非向量中断)
- 实现系统节拍定时器中断
- 实现一个串口中断
#### 4.移植步骤
- 步骤一:配置中断管理入口,相关中断入口函数位于**common/interrupt_gcc.S**,入口函数为**SW_handler**
- 根据使用的中断管理方式,执行下述操作
- 向量中断管理方式
> 通常会使用一个软件中断该中断的优先级被配置为最低作为在中断中触发上下文切换的入口函数。SW_handler在此仅作为触发软件中断时的入口参数其他类型中断触发时跳转至各自的中断入口函数。
>
移植方法:修改原有的中断向量表中软件中断所在位置,将原有软件中断函数名修改为**SW_handler**
示例ch32系列
```assembly
_vector_base:
.option norvc;
.word _start
...
.word SW_handler /* 将这里原来放置的软件中断函数名修改为SW_handler */
```
- 非向量中断
> 当有中断触发时会进入一个统一的中断入口函数进行中断查询分发。SW_handler在此处不仅作为作为在中断中触发上下文切换的入口函数同时承担中断查询分发与执行。
>
移植方法:将**SW_handler**的地址加载到保存统一中断入口地址的寄存器通常为mtevc具体名称需要根据具体的内核指定
示例(hpm6750系列):
```assembly
la t0, SW_handler
csrw mtvec, t0
```
- 步骤二:修改链接脚本,在中断栈顶名称后添加示例代码
- 将下述代码放置于链接脚本中中断栈顶名称之后
```assembly
PROVIDE( __rt_rvstack = . );
```
- 示例core-v-mcu链接脚本
```assembly
.stack : ALIGN(16)
{
stack_start = .;
__stack_bottom = .;
. += __stack_size;
__stack_top = .;
PROVIDE( __rt_rvstack = . );//移植时添加
stack = .;
} > L2
```
> __stack_top为core-v-mcu工程的中断栈顶名 不同工程此处的名称可能不一致 按上述方法将给出的代码放到具体工程链接脚本中断栈顶名称之后即可。
- 步骤三:实现在中断上下文切换的函数接口
<font color=red>RISC-V架构的内核通常采用非向量中断的管理方式为了进一步降低难度针对非向量模式的中断管理方式common文件夹中的trap_common.c为用户提供了一套统一的中断查询分发、中断入口函数注册以及中断初始化函数在rthw.h中声明对于移植一个新的RV32内核若采用非向量中断管理的方式推荐使用方式一若采用向量中断管理方式或针对中断的处理有专门的优化时推荐使用方式二,期望采用原有裸机工程的统一的中断查询与处理函数也可使用方式二。以下是两种实现方式的示例:</font>
方式一:面向非向量中断管理方式(例:core-v-mcu)
在RT-Thread的BSP框架中的board文件夹创建一个统一名称的汇编文件trap_gcc.S,将该文件添加到编译环境即可,该函数的实现如下(用户直接使用,无需修改):
```assembly
#include "cpuport.h"
.globl rt_hw_do_after_save_above
.type rt_hw_do_after_save_above,@function
rt_hw_do_after_save_above:
addi sp, sp, -4
STORE ra, 0 * REGBYTES(sp)
csrr a0, mcause
csrr a1, mepc
mv a2, sp
call rt_rv32_system_irq_handler
LOAD ra, 0 * REGBYTES(sp)
addi sp, sp, 4
ret
```
随后用户仅需调用rt_hw_interrupt_init进行初始化再将中断入口函数通过rt_hw_interrupt_install函数注册即可注册的中断入口函数为裸机原有的中断入口函数示例代码如下(相关设备的中断入口函数注册之前不可使用该设备):
```c
rt_hw_interrupt_init();//中断入口函数初始化
rt_hw_interrupt_install(0x7, timer_irq_handler, RT_NULL, "timerirq");//注册系统定时器中断入口函数
rt_hw_interrupt_install(0xb, fc_soc_event_handler1, RT_NULL, "eventirq");//注册外部中断入口函数
```
方式二:面向向量中断管理方式(例:CH32)与针对中断管理有专门优化的内核(例:GD32)
- 向量中断(可参考ch32)
在RT-Thread的BSP框架中的board文件夹创建需要的文件实现下述的两个函数
- 在void rt_trigger_software_interrupt(void) 中实现触发软件中断的操作
- 在void rt_hw_do_after_save_above(void) 中实现触发软件中断之后的工作,通常是清除软件中断置位标志位或类似操作
- 非向量中断(期望采用原有裸机工程的统一的中断查询与处理函数)
在RT-Thread的BSP框架中的board文件夹创建一个统一名称的汇编文件trap_gcc.S,将该文件添加到编译环境即可,此步骤与方式一提供的方法相似,仅在调用中断处理函数以及传递的参数不同,需要根据具体的移植工程实现,方式二下该函数的实现如下:
示例代码:
```assembly
#include "cpuport.h"
.globl rt_hw_do_after_save_above
.type rt_hw_do_after_save_above,@function
rt_hw_do_after_save_above:
addi sp, sp, -4 // 移动栈指针
STORE ra, 0 * REGBYTES(sp) // 将返回地址寄存器值保存至栈中
csrr a0, mscratch// 加载函数入口参数
call trap_entry// 调用中断处理函数
LOAD ra, 0 * REGBYTES(sp) // 从栈中恢复返回地址寄存器值
addi sp, sp, 4// 移动栈指针
ret // 返回SW_handler
```
trap_entry为用户实现的中断源查询分发的函数在移植时仅需要将该函数名修改为用户的中断查询分发函数即可。
#### 5.验证
- 创建一个静态线程在线程中调用RT-Thread提供的与系统时基相关函数接口例如rt_thread_mdelay调试观察系统是否可以正常运行
- 移植RT-Thread的shell进一步验证系统是否移植成功

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-03 Bernard The first version
*/
#ifndef RISCV_OPS_H__
#define RISCV_OPS_H__
#if defined(__GNUC__) && !defined(__ASSEMBLER__)
#define read_csr(reg) ({ unsigned long __tmp; \
asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
__tmp; })
#define write_csr(reg, val) ({ \
if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
else \
asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
#define set_csr(reg, bit) ({ unsigned long __tmp; \
if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
else \
asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
__tmp; })
#define clear_csr(reg, bit) ({ unsigned long __tmp; \
if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
else \
asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
__tmp; })
#endif /* end of __GNUC__ */
#endif

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-03 Bernard The first version
*/
#ifndef RISCV_PLIC_H__
#define RISCV_PLIC_H__
#ifndef PLIC_BASE_ADDR
#define PLIC_BASE_ADDR 0x0
#endif
/* Priority Register - 32 bits per source */
#define PLIC_PRIORITY_OFFSET (0x00000000UL)
#define PLIC_PRIORITY_SHIFT_PER_SOURCE 2
/* Pending Register - 1 bit per soirce */
#define PLIC_PENDING_OFFSET (0x00001000UL)
#define PLIC_PENDING_SHIFT_PER_SOURCE 0
/* Enable Register - 0x80 per target */
#define PLIC_ENABLE_OFFSET (0x00002000UL)
#define PLIC_ENABLE_SHIFT_PER_TARGET 7
/* Priority Threshold Register - 0x1000 per target */
#define PLIC_THRESHOLD_OFFSET (0x00200000UL)
#define PLIC_THRESHOLD_SHIFT_PER_TARGET 12
/* Claim Register - 0x1000 per target */
#define PLIC_CLAIM_OFFSET (0x00200004UL)
#define PLIC_CLAIM_SHIFT_PER_TARGET 12
#if defined(__GNUC__) && !defined(__ASSEMBLER__)
__attribute__((always_inline)) static inline void __plic_set_feature(unsigned int feature)
{
volatile unsigned int *feature_ptr = (volatile unsigned int *)PLIC_BASE_ADDR;
*feature_ptr = feature;
}
__attribute__((always_inline)) static inline void __plic_set_threshold(unsigned int threshold)
{
unsigned int hart_id = read_csr(mhartid);
volatile unsigned int *threshold_ptr = (volatile unsigned int *)(PLIC_BASE_ADDR +
PLIC_THRESHOLD_OFFSET +
(hart_id << PLIC_THRESHOLD_SHIFT_PER_TARGET));
*threshold_ptr = threshold;
}
__attribute__((always_inline)) static inline void __plic_set_priority(unsigned int source, unsigned int priority)
{
volatile unsigned int *priority_ptr = (volatile unsigned int *)(PLIC_BASE_ADDR +
PLIC_PRIORITY_OFFSET +
(source << PLIC_PRIORITY_SHIFT_PER_SOURCE));
*priority_ptr = priority;
}
__attribute__((always_inline)) static inline void __plic_set_pending(unsigned int source)
{
volatile unsigned int *current_ptr = (volatile unsigned int *)(PLIC_BASE_ADDR +
PLIC_PENDING_OFFSET +
((source >> 5) << 2));
*current_ptr = (1 << (source & 0x1F));
}
__attribute__((always_inline)) static inline void __plic_irq_enable(unsigned int source)
{
unsigned int hart_id = read_csr(mhartid);
volatile unsigned int *current_ptr = (volatile unsigned int *)(PLIC_BASE_ADDR +
PLIC_ENABLE_OFFSET +
(hart_id << PLIC_ENABLE_SHIFT_PER_TARGET) +
((source >> 5) << 2));
unsigned int current = *current_ptr;
current = current | (1 << (source & 0x1F));
*current_ptr = current;
}
__attribute__((always_inline)) static inline void __plic_irq_disable(unsigned int source)
{
unsigned int hart_id = read_csr(mhartid);
volatile unsigned int *current_ptr = (volatile unsigned int *)(PLIC_BASE_ADDR +
PLIC_ENABLE_OFFSET +
(hart_id << PLIC_ENABLE_SHIFT_PER_TARGET) +
((source >> 5) << 2));
unsigned int current = *current_ptr;
current = current & ~((1 << (source & 0x1F)));
*current_ptr = current;
}
__attribute__((always_inline)) static inline unsigned int __plic_irq_claim(void)
{
unsigned int hart_id = read_csr(mhartid);
volatile unsigned int *claim_addr = (volatile unsigned int *)(PLIC_BASE_ADDR +
PLIC_CLAIM_OFFSET +
(hart_id << PLIC_CLAIM_SHIFT_PER_TARGET));
return *claim_addr;
}
__attribute__((always_inline)) static inline void __plic_irq_complete(unsigned int source)
{
unsigned int hart_id = read_csr(mhartid);
volatile unsigned int *claim_addr = (volatile unsigned int *)(PLIC_BASE_ADDR +
PLIC_CLAIM_OFFSET +
(hart_id << PLIC_CLAIM_SHIFT_PER_TARGET));
*claim_addr = source;
}
#endif /* end of __GNUC__ */
#endif

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-03 Bernard The first version
*/
#ifndef RISCV_STACKFRAME_H
#define RISCV_STACKFRAME_H
#include "cpuport.h"
typedef struct rt_hw_stack_frame
{
rt_ubase_t epc; /* epc - epc - program counter */
rt_ubase_t ra; /* x1 - ra - return address for jumps */
rt_ubase_t mstatus; /* - machine status register */
rt_ubase_t gp; /* x3 - gp - global pointer */
rt_ubase_t tp; /* x4 - tp - thread pointer */
rt_ubase_t t0; /* x5 - t0 - temporary register 0 */
rt_ubase_t t1; /* x6 - t1 - temporary register 1 */
rt_ubase_t t2; /* x7 - t2 - temporary register 2 */
rt_ubase_t s0_fp; /* x8 - s0/fp - saved register 0 or frame pointer */
rt_ubase_t s1; /* x9 - s1 - saved register 1 */
rt_ubase_t a0; /* x10 - a0 - return value or function argument 0 */
rt_ubase_t a1; /* x11 - a1 - return value or function argument 1 */
rt_ubase_t a2; /* x12 - a2 - function argument 2 */
rt_ubase_t a3; /* x13 - a3 - function argument 3 */
rt_ubase_t a4; /* x14 - a4 - function argument 4 */
rt_ubase_t a5; /* x15 - a5 - function argument 5 */
#ifndef __riscv_32e
rt_ubase_t a6; /* x16 - a6 - function argument 6 */
rt_ubase_t a7; /* x17 - a7 - function argument 7 */
rt_ubase_t s2; /* x18 - s2 - saved register 2 */
rt_ubase_t s3; /* x19 - s3 - saved register 3 */
rt_ubase_t s4; /* x20 - s4 - saved register 4 */
rt_ubase_t s5; /* x21 - s5 - saved register 5 */
rt_ubase_t s6; /* x22 - s6 - saved register 6 */
rt_ubase_t s7; /* x23 - s7 - saved register 7 */
rt_ubase_t s8; /* x24 - s8 - saved register 8 */
rt_ubase_t s9; /* x25 - s9 - saved register 9 */
rt_ubase_t s10; /* x26 - s10 - saved register 10 */
rt_ubase_t s11; /* x27 - s11 - saved register 11 */
rt_ubase_t t3; /* x28 - t3 - temporary register 3 */
rt_ubase_t t4; /* x29 - t4 - temporary register 4 */
rt_ubase_t t5; /* x30 - t5 - temporary register 5 */
rt_ubase_t t6; /* x31 - t6 - temporary register 6 */
#endif
#ifdef ARCH_RISCV_FPU
rv_floatreg_t f0; /* f0 */
rv_floatreg_t f1; /* f1 */
rv_floatreg_t f2; /* f2 */
rv_floatreg_t f3; /* f3 */
rv_floatreg_t f4; /* f4 */
rv_floatreg_t f5; /* f5 */
rv_floatreg_t f6; /* f6 */
rv_floatreg_t f7; /* f7 */
rv_floatreg_t f8; /* f8 */
rv_floatreg_t f9; /* f9 */
rv_floatreg_t f10; /* f10 */
rv_floatreg_t f11; /* f11 */
rv_floatreg_t f12; /* f12 */
rv_floatreg_t f13; /* f13 */
rv_floatreg_t f14; /* f14 */
rv_floatreg_t f15; /* f15 */
rv_floatreg_t f16; /* f16 */
rv_floatreg_t f17; /* f17 */
rv_floatreg_t f18; /* f18 */
rv_floatreg_t f19; /* f19 */
rv_floatreg_t f20; /* f20 */
rv_floatreg_t f21; /* f21 */
rv_floatreg_t f22; /* f22 */
rv_floatreg_t f23; /* f23 */
rv_floatreg_t f24; /* f24 */
rv_floatreg_t f25; /* f25 */
rv_floatreg_t f26; /* f26 */
rv_floatreg_t f27; /* f27 */
rv_floatreg_t f28; /* f28 */
rv_floatreg_t f29; /* f29 */
rv_floatreg_t f30; /* f30 */
rv_floatreg_t f31; /* f31 */
#endif
}rt_hw_stack_frame_t;
#endif /* RISCV_STACKFRAME_H */

View File

@ -0,0 +1,110 @@
#include <rthw.h>
#include <rtthread.h>
#include "riscv-ops.h"
#include "rt_hw_stack_frame.h"
#define ISR_NUMBER 32
static volatile rt_hw_stack_frame_t *s_stack_frame;
static struct rt_irq_desc rv32irq_table[ISR_NUMBER];
void rt_show_stack_frame(void);
/**
* Temporary interrupt entry function
*
* @param mcause Machine Cause Register
* @return RT_NULL
*/
rt_weak rt_isr_handler_t rt_hw_interrupt_handle(rt_uint32_t mcause)
{
rt_kprintf("UN-handled interrupt %d occurred!!!\n", mcause);
return RT_NULL;
}
/**
* Interrupt entry function initialization
*/
rt_weak void rt_hw_interrupt_init(void)
{
int idx = 0;
for (idx = 0; idx < ISR_NUMBER; idx++)
{
rv32irq_table[idx].handler = (rt_isr_handler_t)rt_hw_interrupt_handle;
rv32irq_table[idx].param = RT_NULL;
}
}
/**
* Break Entry Function Binding
*
* @param vector interrupt number
* @param handler Break-in function requiring binding
* @param param NULL
* @param name NULL
* @return old handler
*/
rt_weak rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if(vector < ISR_NUMBER)
{
old_handler = rv32irq_table[vector].handler;
if (handler != RT_NULL)
{
rv32irq_table[vector].handler = (rt_isr_handler_t)handler;
rv32irq_table[vector].param = param;
}
}
return old_handler;
}
/**
* Query and Distribution Entry for Exception and Interrupt Sources
*
* @param mcause Machine Cause Register
*/
rt_weak void rt_rv32_system_irq_handler(rt_uint32_t mcause)
{
rt_uint32_t mscratch = read_csr(0x340);
rt_uint32_t irq_id = (mcause & 0x1F);
rt_uint32_t exception = !(mcause & 0x80000000);
if(exception)
{
s_stack_frame = (volatile rt_hw_stack_frame_t *)(uintptr_t)mscratch;
rt_show_stack_frame();
}
else
{
rv32irq_table[irq_id].handler(irq_id, rv32irq_table[irq_id].param);
}
}
/**
* Register Print on Exception
*/
rt_weak void rt_show_stack_frame(void)
{
rt_kprintf("Stack frame:\r\n----------------------------------------\r\n");
rt_kprintf("ra : 0x%08x\r\n", s_stack_frame->ra);
rt_kprintf("mstatus : 0x%08x\r\n", read_csr(0x300));//mstatus
rt_kprintf("t0 : 0x%08x\r\n", s_stack_frame->t0);
rt_kprintf("t1 : 0x%08x\r\n", s_stack_frame->t1);
rt_kprintf("t2 : 0x%08x\r\n", s_stack_frame->t2);
rt_kprintf("a0 : 0x%08x\r\n", s_stack_frame->a0);
rt_kprintf("a1 : 0x%08x\r\n", s_stack_frame->a1);
rt_kprintf("a2 : 0x%08x\r\n", s_stack_frame->a2);
rt_kprintf("a3 : 0x%08x\r\n", s_stack_frame->a3);
rt_kprintf("a4 : 0x%08x\r\n", s_stack_frame->a4);
rt_kprintf("a5 : 0x%08x\r\n", s_stack_frame->a5);
#ifndef __riscv_32e
rt_kprintf("a6 : 0x%08x\r\n", s_stack_frame->a6);
rt_kprintf("a7 : 0x%08x\r\n", s_stack_frame->a7);
rt_kprintf("t3 : 0x%08x\r\n", s_stack_frame->t3);
rt_kprintf("t4 : 0x%08x\r\n", s_stack_frame->t4);
rt_kprintf("t5 : 0x%08x\r\n", s_stack_frame->t5);
rt_kprintf("t6 : 0x%08x\r\n", s_stack_frame->t6);
#endif
}

View File

@ -0,0 +1,52 @@
### RT-Thread RV64支持
#### 1.概述
该目录提供RT-Thread标准版及SMART版本对rv64体系结构支持其中包括
| 文件名 | 文件内容 | 参考标准 |
| :---------------: | :------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| atomic_rv64.c | 原子操作实现接口 | "A" Extension for Atomic Instructions, Version 2.1 |
| context_gcc.S | 线程上下文切换 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| cpuport_gcc.S | 线程统一入口 | |
| cpuport.c | 线程栈初始化 | |
| cpuport.h | 通用寄存器、浮点、向量寄存器个数定义,内存屏障接口 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| interrupt_gcc.S | 异常/中断处理、全局中断使能/关闭 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| io.h | 以字节、字、双字读、写IO地址接口 | |
| encoding.h | CSR寄存器定义 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| vector_encoding.h | vector相关指令定义 | RISC-V "V" Standard Extension for Vector Operations, Version 1.0 |
| ext_context.h | 浮点/向量上下文保存与恢复 | RISC-V "V" Standard Extension for Vector Operations, Version 1.0 "F" Extension for Single-Precision Floating-Point Version 2.2 |
| mmu.c | rv64 sv39 mmu管理接口 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| mmu.h | rv64 sv39 mmu页表相关定义 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| asid.c | rv64 mmu asid支持 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| riscv_mmu.c | 使能/关闭S态访问用户态页表 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| sbi.c | 通过ecall调用SBI相关信息接口 | RISC-V Supervisor Binary Interface Specification Version 1.0 |
| sbi.h | SBI spec相关接口定义 | RISC-V Supervisor Binary Interface Specification Version 1.0 |
| stack.h | 线程栈数据定义 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| stackframe.h | 线程上下文保存/恢复 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| syscall_c.c | 系统调用处理 | |
| tick.c | S态时钟初始化及中断处理 | |
| tlb.h | tlb刷新/无效接口 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
| trap.c | 异常/中断处理,包括中断分发及用户态异常处理 | The RISC-V Instruction Set Manual Volume II: privileged supervisor-level ISA version 1.12 |
#### 2.运行模式配置
| 选项 | 默认值 | 说明 |
| --------------- | --- | ---------------------------------------------------------------------------------------------------- |
| RISCV_VIRT64_S_MODE | 打开 | 系统启动后是否运行在S态关闭时系统将运行在M态目前系统存在bug尚不可直接运行在M态故此开关必须打开 |
| RT_USING_SMART | 关闭 | 是否开启RTThread SMART版本开启后系统运行在S+U态且会开启MMU页表(satp);关闭时系统仅运行在S态MMU关闭(satp为bare translation) |
| ARCH_USING_ASID | 关闭 | MMU是否支持asid |
#### 3.移植指南
1增加新的CPU支持
创建`libcpu/risc-v/<VENDOR_NAME>/<CPU_NAME>`新目录,同时在`libcpu/risc-v/SConscript`中增加该CPU。
2PLIC中断控制器支持
`libcpu/risc-v/virt64/plic.[c|h]`提供了符合《RISC-V Platform-Level Interrupt Controller Specification version 1.0.0 》标准的PLIC中断控制器驱动代码可作为移植参考。
3串口uart支持
目前串口驱动在各bsp目录下可参考`bsp/qemu-virt64-riscv/driver/drv_uart.[c|h]`

View File

@ -0,0 +1,12 @@
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
if not GetDepend('ARCH_USING_ASID'):
SrcRemove(src, ['asid.c'])
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-12 RT-Thread first version.
*/
#define DBG_TAG "hw.asid"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rtthread.h>
#include <board.h>
#include <cache.h>
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>
static rt_uint8_t ASID_BITS = 0;
static rt_uint32_t next_asid;
static rt_uint64_t global_asid_generation;
#define ASID_MASK ((1 << ASID_BITS) - 1)
#define ASID_FIRST_GENERATION (1 << ASID_BITS)
#define MAX_ASID ASID_FIRST_GENERATION
void rt_hw_asid_init(void)
{
rt_uint64_t satp_reg = read_csr(satp);
satp_reg |= (((rt_uint64_t)0xffff) << PPN_BITS);
write_csr(satp, satp_reg);
__asm__ volatile("sfence.vma x0, x0");
unsigned short valid_asid_bit = ((read_csr(satp) >> PPN_BITS) & 0xffff);
// The maximal value of ASIDLEN, is 9 for Sv32 or 16 for Sv39, Sv48, and Sv57
for (unsigned i = 0; i < 16; i++)
{
if (!(valid_asid_bit & 0x1))
{
break;
}
valid_asid_bit >>= 1;
ASID_BITS++;
}
global_asid_generation = ASID_FIRST_GENERATION;
next_asid = 1;
}
static rt_uint64_t _asid_acquire(rt_aspace_t aspace)
{
if ((aspace->asid ^ global_asid_generation) >> ASID_BITS) // not same generation
{
if (next_asid != MAX_ASID)
{
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
else
{
// scroll to next generation
global_asid_generation += ASID_FIRST_GENERATION;
next_asid = 1;
rt_hw_tlb_invalidate_all_local();
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
}
return aspace->asid & ASID_MASK;
}
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl)
{
rt_uint64_t asid = _asid_acquire(aspace);
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
(asid << PPN_BITS) |
((rt_ubase_t)pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
}

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2006-2023 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-03-12 WangXiaoyao the first version
*/
#ifndef __ASM_GENERIC_H__
#define __ASM_GENERIC_H__
/* use to mark a start point where every task start from */
#define START_POINT(funcname) \
.global funcname; \
.type funcname, %function; \
funcname: \
.cfi_sections .debug_frame, .eh_frame; \
.cfi_startproc; \
.cfi_undefined ra
#define START_POINT_END(name) \
.cfi_endproc; \
.size name, .-name;
#endif /* __ASM_GENERIC_H__ */

View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-10-18 Shell Add backtrace support
*/
#define DBG_TAG "hw.backtrace"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rtthread.h>
#include <mm_aspace.h>
#include "riscv_mmu.h"
#include "stack.h"
#define WORD sizeof(rt_base_t)
#define ARCH_CONTEXT_FETCH(pctx, id) (*(((unsigned long *)pctx) + (id)))
rt_inline rt_err_t _bt_kaddr(rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
frame->fp = *(fp - 2);
frame->pc = *(fp - 1);
if ((rt_ubase_t)fp == frame->fp)
{
rc = -RT_ERROR;
}
else
{
rc = RT_EOK;
}
return rc;
}
#ifdef RT_USING_SMART
#include <lwp_arch.h>
#include <lwp_user_mm.h>
rt_inline rt_err_t _bt_uaddr(rt_lwp_t lwp, rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
if (lwp_data_get(lwp, &frame->fp, fp - 2, WORD) != WORD)
{
rc = -RT_EFAULT;
}
else if (lwp_data_get(lwp, &frame->pc, fp - 1, WORD) != WORD)
{
rc = -RT_EFAULT;
}
else if ((rt_ubase_t)fp == frame->fp)
{
rc = -RT_ERROR;
}
else
{
frame->pc -= 0;
rc = RT_EOK;
}
return rc;
}
#endif /* RT_USING_SMART */
rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc = -RT_ERROR;
rt_uintptr_t *fp = (rt_uintptr_t *)frame->fp;
if (fp && !((long)fp & 0x7))
{
#ifdef RT_USING_SMART
if (thread->lwp)
{
void *lwp = thread->lwp;
void *this_lwp = lwp_self();
if (this_lwp == lwp && rt_hw_mmu_v2p(((rt_lwp_t)lwp)->aspace, fp) != ARCH_MAP_FAILED)
{
rc = _bt_kaddr(fp, frame);
}
else if (lwp_user_accessible_ext(lwp, (void *)fp, WORD))
{
rc = _bt_uaddr(lwp, fp, frame);
}
else
{
rc = -RT_EFAULT;
}
}
else
#endif
if ((rt_kmem_v2p(fp) != ARCH_MAP_FAILED))
{
rc = _bt_kaddr(fp, frame);
}
else
{
rc = -RT_EINVAL;
}
}
else
{
rc = -RT_EFAULT;
}
return rc;
}
rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
if (!thread || !frame)
{
rc = -RT_EINVAL;
}
else
{
rt_hw_switch_frame_t sframe = thread->sp;
frame->pc = sframe->regs[RT_HW_SWITCH_CONTEXT_RA];
frame->fp = sframe->regs[RT_HW_SWITCH_CONTEXT_S0];;
rc = RT_EOK;
}
return rc;
}

View File

@ -0,0 +1,115 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting implementation
* 2018/12/27 Jesven Add SMP support
* 2021/02/02 lizhirui Add userspace support
* 2022/10/22 Shell Support User mode RVV;
* Trimming process switch context
*/
#include "cpuport.h"
#include "stackframe.h"
#define _REG_IDX(name) RT_HW_SWITCH_CONTEXT_##name
#define REG_IDX(name) _REG_IDX(name)
.macro SAVE_REG reg, index
STORE \reg, \index*REGBYTES(sp)
.endm
.macro LOAD_REG reg, index
LOAD \reg, \index*REGBYTES(sp)
.endm
.macro RESERVE_CONTEXT
addi sp, sp, -(RT_HW_SWITCH_CONTEXT_SIZE * REGBYTES)
SAVE_REG tp, REG_IDX(TP)
SAVE_REG ra, REG_IDX(RA)
SAVE_REG s0, REG_IDX(S0)
SAVE_REG s1, REG_IDX(S1)
SAVE_REG s2, REG_IDX(S2)
SAVE_REG s3, REG_IDX(S3)
SAVE_REG s4, REG_IDX(S4)
SAVE_REG s5, REG_IDX(S5)
SAVE_REG s6, REG_IDX(S6)
SAVE_REG s7, REG_IDX(S7)
SAVE_REG s8, REG_IDX(S8)
SAVE_REG s9, REG_IDX(S9)
SAVE_REG s10, REG_IDX(S10)
SAVE_REG s11, REG_IDX(S11)
csrr s11, sstatus
li s10, (SSTATUS_SPP)
or s11, s11, s10
SAVE_REG s11, REG_IDX(SSTATUS)
.endm
.macro RESTORE_CONTEXT
LOAD_REG s11, REG_IDX(SSTATUS)
csrw sstatus, s11
LOAD_REG s11, REG_IDX(S11)
LOAD_REG s10, REG_IDX(S10)
LOAD_REG s9, REG_IDX(S9)
LOAD_REG s8, REG_IDX(S8)
LOAD_REG s7, REG_IDX(S7)
LOAD_REG s6, REG_IDX(S6)
LOAD_REG s5, REG_IDX(S5)
LOAD_REG s4, REG_IDX(S4)
LOAD_REG s3, REG_IDX(S3)
LOAD_REG s2, REG_IDX(S2)
LOAD_REG s1, REG_IDX(S1)
LOAD_REG s0, REG_IDX(S0)
LOAD_REG ra, REG_IDX(RA)
LOAD_REG tp, REG_IDX(TP)
addi sp, sp, RT_HW_SWITCH_CONTEXT_SIZE * REGBYTES
csrw sepc, ra
.endm
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
*
* a0 --> to SP pointer
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LOAD sp, (a0)
call rt_thread_self
mv s1, a0
#ifdef RT_USING_SMART
call lwp_aspace_switch
#endif
RESTORE_CONTEXT
sret
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
*
* a0 --> from SP pointer
* a1 --> to SP pointer
*
* It should only be used on local interrupt disable
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
RESERVE_CONTEXT
STORE sp, (a0)
// restore to thread SP
LOAD sp, (a1)
// restore Address Space
call rt_thread_self
mv s1, a0
#ifdef RT_USING_SMART
call lwp_aspace_switch
#endif
RESTORE_CONTEXT
sret

View File

@ -0,0 +1,139 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting code.
* 2021-02-11 lizhirui add gp support
* 2021-11-19 JasonHu add fpu support
*/
#include <rthw.h>
#include <rtthread.h>
#include "cpuport.h"
#include "stack.h"
#include <sbi.h>
#include <encoding.h>
#ifdef ARCH_RISCV_FPU
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
#else
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
#endif
#ifdef ARCH_RISCV_VECTOR
#define K_SSTATUS_DEFAULT (K_SSTATUS_DEFAULT_BASE | SSTATUS_VS)
#else
#define K_SSTATUS_DEFAULT K_SSTATUS_DEFAULT_BASE
#endif
#ifdef RT_USING_SMART
#include <lwp_arch.h>
#endif
/**
* @brief from thread used interrupt context switch
*
*/
volatile rt_ubase_t rt_interrupt_from_thread = 0;
/**
* @brief to thread used interrupt context switch
*
*/
volatile rt_ubase_t rt_interrupt_to_thread = 0;
/**
* @brief flag to indicate context switch in interrupt or not
*
*/
volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
{
rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)
((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
rt_memset(frame, 0, sizeof(struct rt_hw_switch_frame));
frame->regs[RT_HW_SWITCH_CONTEXT_RA] = ra;
frame->regs[RT_HW_SWITCH_CONTEXT_SSTATUS] = sstatus;
return (void *)frame;
}
int rt_hw_cpu_id(void)
{
return 0;
}
/**
* This function will initialize thread stack, we assuming
* when scheduler restore this new thread, context will restore
* an entry to user first application
*
* s0-s11, ra, sstatus, a0
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry,
void *parameter,
rt_uint8_t *stack_addr,
void *texit)
{
rt_ubase_t *sp = (rt_ubase_t *)stack_addr;
// we use a strict alignment requirement for Q extension
sp = (rt_ubase_t *)RT_ALIGN_DOWN((rt_ubase_t)sp, 16);
(*--sp) = (rt_ubase_t)tentry;
(*--sp) = (rt_ubase_t)parameter;
(*--sp) = (rt_ubase_t)texit;
--sp; /* alignment */
/* compatible to RESTORE_CONTEXT */
extern void _rt_thread_entry(void);
return (rt_uint8_t *)_rt_hw_stack_init(sp, (rt_ubase_t)_rt_thread_entry, K_SSTATUS_DEFAULT);
}
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
* #else
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to);
* #endif
*/
#ifndef RT_USING_SMP
void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
{
if (rt_thread_switch_interrupt_flag == 0)
rt_interrupt_from_thread = from;
rt_interrupt_to_thread = to;
rt_thread_switch_interrupt_flag = 1;
return;
}
#endif /* end of RT_USING_SMP */
/** shutdown CPU */
void rt_hw_cpu_shutdown(void)
{
rt_uint32_t level;
rt_kprintf("shutdown...\n");
level = rt_hw_interrupt_disable();
sbi_shutdown();
while (1)
;
}
void rt_hw_set_process_id(int pid)
{
// TODO
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-03 Bernard The first version
*/
#ifndef CPUPORT_H__
#define CPUPORT_H__
#include <rtconfig.h>
#include <opcode.h>
#ifndef __ASSEMBLY__
#ifdef RT_USING_SMP
typedef union {
unsigned long slock;
struct __arch_tickets {
unsigned short owner;
unsigned short next;
} tickets;
} rt_hw_spinlock_t;
#endif
#include <rtcompiler.h>
rt_inline void rt_hw_dsb(void)
{
__asm__ volatile("fence":::"memory");
}
rt_inline void rt_hw_dmb(void)
{
__asm__ volatile("fence":::"memory");
}
rt_inline void rt_hw_isb(void)
{
__asm__ volatile(OPC_FENCE_I:::"memory");
}
int rt_hw_cpu_id(void);
#endif
#endif
#ifdef RISCV_U_MODE
#define RISCV_USER_ENTRY 0xFFFFFFE000000000ULL
#endif

View File

@ -0,0 +1,25 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-19 RT-Thread the first version
*/
#include "cpuport.h"
#include "stackframe.h"
#include "asm-generic.h"
START_POINT(_rt_thread_entry)
LOAD ra, REGBYTES(sp) /* thread exit */
addi sp, sp, 2 * REGBYTES
LOAD a0, (sp) /* parameter */
LOAD t0, REGBYTES(sp) /* tentry */
addi sp, sp, 2 * REGBYTES
mv s1, ra
jalr t0
jalr s1
j . /* never here */
START_POINT_END(_rt_thread_entry)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-10 RT-Thread the first version
*/
#ifndef __EXT_CONTEXT_H__
#define __EXT_CONTEXT_H__
#include <rtconfig.h>
#ifdef ARCH_RISCV_FPU
/* 32 fpu register */
#define CTX_FPU_REG_NR 32
#else
#define CTX_FPU_REG_NR 0
#endif /* ARCH_RISCV_FPU */
#ifdef __ASSEMBLY__
/**
* ==================================
* RISC-V D ISA (Floating)
* ==================================
*/
#ifdef ARCH_RISCV_FPU
#define FPU_CTX_F0_OFF (REGBYTES * 0) /* offsetof(fpu_context_t, fpustatus.f[0]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F1_OFF (REGBYTES * 1) /* offsetof(fpu_context_t, fpustatus.f[1]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F2_OFF (REGBYTES * 2) /* offsetof(fpu_context_t, fpustatus.f[2]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F3_OFF (REGBYTES * 3) /* offsetof(fpu_context_t, fpustatus.f[3]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F4_OFF (REGBYTES * 4) /* offsetof(fpu_context_t, fpustatus.f[4]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F5_OFF (REGBYTES * 5) /* offsetof(fpu_context_t, fpustatus.f[5]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F6_OFF (REGBYTES * 6) /* offsetof(fpu_context_t, fpustatus.f[6]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F7_OFF (REGBYTES * 7) /* offsetof(fpu_context_t, fpustatus.f[7]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F8_OFF (REGBYTES * 8) /* offsetof(fpu_context_t, fpustatus.f[8]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F9_OFF (REGBYTES * 9) /* offsetof(fpu_context_t, fpustatus.f[9]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F10_OFF (REGBYTES * 10) /* offsetof(fpu_context_t, fpustatus.f[10]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F11_OFF (REGBYTES * 11) /* offsetof(fpu_context_t, fpustatus.f[11]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F12_OFF (REGBYTES * 12) /* offsetof(fpu_context_t, fpustatus.f[12]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F13_OFF (REGBYTES * 13) /* offsetof(fpu_context_t, fpustatus.f[13]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F14_OFF (REGBYTES * 14) /* offsetof(fpu_context_t, fpustatus.f[14]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F15_OFF (REGBYTES * 15) /* offsetof(fpu_context_t, fpustatus.f[15]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F16_OFF (REGBYTES * 16) /* offsetof(fpu_context_t, fpustatus.f[16]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F17_OFF (REGBYTES * 17) /* offsetof(fpu_context_t, fpustatus.f[17]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F18_OFF (REGBYTES * 18) /* offsetof(fpu_context_t, fpustatus.f[18]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F19_OFF (REGBYTES * 19) /* offsetof(fpu_context_t, fpustatus.f[19]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F20_OFF (REGBYTES * 20) /* offsetof(fpu_context_t, fpustatus.f[20]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F21_OFF (REGBYTES * 21) /* offsetof(fpu_context_t, fpustatus.f[21]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F22_OFF (REGBYTES * 22) /* offsetof(fpu_context_t, fpustatus.f[22]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F23_OFF (REGBYTES * 23) /* offsetof(fpu_context_t, fpustatus.f[23]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F24_OFF (REGBYTES * 24) /* offsetof(fpu_context_t, fpustatus.f[24]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F25_OFF (REGBYTES * 25) /* offsetof(fpu_context_t, fpustatus.f[25]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F26_OFF (REGBYTES * 26) /* offsetof(fpu_context_t, fpustatus.f[26]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F27_OFF (REGBYTES * 27) /* offsetof(fpu_context_t, fpustatus.f[27]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F28_OFF (REGBYTES * 28) /* offsetof(fpu_context_t, fpustatus.f[28]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F29_OFF (REGBYTES * 29) /* offsetof(fpu_context_t, fpustatus.f[29]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F30_OFF (REGBYTES * 30) /* offsetof(fpu_context_t, fpustatus.f[30]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F31_OFF (REGBYTES * 31) /* offsetof(fpu_context_t, fpustatus.f[31]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#endif /* ARCH_RISCV_FPU */
#endif /* __ASSEMBLY__ */
#ifdef ARCH_RISCV_VECTOR
#include "rvv_context.h"
#else /* !ARCH_RISCV_VECTOR */
#define CTX_VECTOR_REG_NR 0
#endif /* ARCH_RISCV_VECTOR */
#endif /* __EXT_CONTEXT_H__ */

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/02 Bernard The first version
* 2018/12/27 Jesven Add SMP schedule
* 2021/02/02 lizhirui Add userspace support
* 2021/12/24 JasonHu Add user setting save/restore
* 2022/10/22 Shell Support kernel mode RVV;
* Rewrite trap handling routine
*/
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
.align 2
.global trap_entry
.global debug_check_sp
trap_entry:
// distingush exception from kernel or user
csrrw sp, sscratch, sp
bnez sp, _save_context
// BE REALLY careful with sscratch,
// if it's wrong, we could looping here forever
// or accessing random memory and seeing things totally
// messy after a long time and don't even know why
_from_kernel:
csrr sp, sscratch
j _save_context
_save_context:
SAVE_ALL
// clear sscratch to say 'now in kernel mode'
csrw sscratch, zero
RESTORE_SYS_GP
// now we are ready to enter interrupt / excepiton handler
_distinguish_syscall:
csrr t0, scause
#ifdef RT_USING_SMART
// TODO swap 8 with config macro name
li t1, 8
bne t0, t1, _handle_interrupt_and_exception
call syscall_entry
// syscall never return here
#endif
_handle_interrupt_and_exception:
mv a0, t0
csrrc a1, stval, zero
csrr a2, sepc
// sp as exception frame pointer
mv a3, sp
call handle_trap
_interrupt_exit:
la s0, rt_thread_switch_interrupt_flag
lw s2, 0(s0)
beqz s2, _resume_execution
sw zero, 0(s0)
_context_switch:
la t0, rt_interrupt_from_thread
LOAD a0, 0(t0)
la t0, rt_interrupt_to_thread
LOAD a1, 0(t0)
csrr t0, sstatus
andi t0, t0, ~SSTATUS_SPIE
csrw sstatus, t0
jal rt_hw_context_switch
_resume_execution:
#ifdef RT_USING_SMART
LOAD t0, FRAME_OFF_SSTATUS(sp)
andi t0, t0, SSTATUS_SPP
bnez t0, _resume_kernel
call arch_ret_to_user
#endif
_resume_kernel:
RESTORE_ALL
csrw sscratch, zero
sret
.global rt_hw_interrupt_enable
rt_hw_interrupt_enable:
csrs sstatus, a0 /* restore to old csr */
jr ra
.global rt_hw_interrupt_disable
rt_hw_interrupt_disable:
csrrci a0, sstatus, 2 /* clear SIE */
jr ra

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2019-2020, Xim
*
* SPDX-License-Identifier: Apache-2.0
*
*/
#ifndef ARCH_IO_H
#define ARCH_IO_H
#include <rtthread.h>
#define RISCV_FENCE(p, s) \
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
#define mb() RISCV_FENCE(iorw,iorw)
#define rmb() RISCV_FENCE(ir,ir)
#define wmb() RISCV_FENCE(ow,ow)
#define __arch_getl(a) (*(unsigned int *)(a))
#define __arch_putl(v, a) (*(unsigned int *)(a) = (v))
#define dmb() mb()
#define __iormb() rmb()
#define __iowmb() wmb()
static inline void writel(uint32_t val, volatile void *addr)
{
__iowmb();
__arch_putl(val, addr);
}
static inline uint32_t readl(const volatile void *addr)
{
uint32_t val;
val = __arch_getl(addr);
__iormb();
return val;
}
static inline void write_reg(
uint32_t val, volatile void *addr, unsigned offset)
{
writel(val, (void *)((rt_size_t)addr + offset));
}
static inline uint32_t read_reg(
const volatile void *addr, unsigned offset)
{
return readl((void *)((rt_size_t)addr + offset));
}
#endif // ARCH_IO_H

View File

@ -0,0 +1,724 @@
/*
* Copyright (c) 2006-2025 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2022-12-13 WangXiaoyao Port to new mm
* 2023-10-12 Shell Add permission control API
*/
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <board.h>
#include <cache.h>
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>
#ifdef RT_USING_SMART
#include <board.h>
#include <ioremap.h>
#include <lwp_user_mm.h>
#endif
#ifndef RT_USING_SMART
#define USER_VADDR_START 0
#endif
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size);
static void *current_mmu_table = RT_NULL;
volatile __attribute__((aligned(4 * 1024)))
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
/**
* @brief Switch the current address space to the specified one.
*
* This function is responsible for switching the address space by updating the page table
* and related hardware state. The behavior depends on whether the architecture supports
* Address Space Identifiers (ASIDs), devided by macro definition of ARCH_USING_ASID.
*
* @param aspace Pointer to the address space structure containing the new page table.
*
* @note If ASID is supported (`ARCH_USING_ASID` is defined), the function will call
* `rt_hw_asid_switch_pgtbl` to switch the page table and update the ASID.
* Otherwise, it will directly write the `satp` CSR to switch the page table
* and invalidate the TLB.
*/
#ifdef ARCH_USING_ASID
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
current_mmu_table = aspace->page_table;
rt_hw_asid_switch_pgtbl(aspace, page_table);
}
#else /* !ARCH_USING_ASID */
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
current_mmu_table = aspace->page_table;
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
rt_hw_tlb_invalidate_all_local();
}
void rt_hw_asid_init(void)
{
}
#endif /* ARCH_USING_ASID */
/* get current page table. */
void *rt_hw_mmu_tbl_get()
{
return current_mmu_table;
}
/* Map a single virtual address page to a physical address page in the page table. */
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
size_t attr)
{
rt_ubase_t l1_off, l2_off, l3_off;
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
l1_off = GET_L1((size_t)va);
l2_off = GET_L2((size_t)va);
l3_off = GET_L3((size_t)va);
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
if (PTE_USED(*mmu_l1))
{
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
}
else
{
mmu_l2 = (rt_ubase_t *)rt_pages_alloc(0);
if (mmu_l2)
{
rt_memset(mmu_l2, 0, PAGE_SIZE);
rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
*mmu_l1 = COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
PAGE_DEFAULT_ATTR_NEXT);
rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
}
else
{
return -1;
}
}
if (PTE_USED(*(mmu_l2 + l2_off)))
{
RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
mmu_l3 =
(rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
}
else
{
mmu_l3 = (rt_ubase_t *)rt_pages_alloc(0);
if (mmu_l3)
{
rt_memset(mmu_l3, 0, PAGE_SIZE);
rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
*(mmu_l2 + l2_off) =
COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
PAGE_DEFAULT_ATTR_NEXT);
rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
/* declares a reference to parent page table */
rt_page_ref_inc((void *)mmu_l2, 0);
}
else
{
return -1;
}
}
RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
/* declares a reference to parent page table */
rt_page_ref_inc((void *)mmu_l3, 0);
*(mmu_l3 + l3_off) = COMBINEPTE((rt_ubase_t)pa, attr);
rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
return 0;
}
/**
* @brief Maps a virtual address space to a physical address space.
*
* This function maps a specified range of virtual addresses to a range of physical addresses
* and sets the attributes of the page table entries (PTEs). If an error occurs during the
* mapping process, the function will automatically roll back any partially completed mappings.
*
* @param aspace Pointer to the address space structure containing the page table information.
* @param v_addr The starting virtual address to be mapped.
* @param p_addr The starting physical address to be mapped.
* @param size The size of the memory to be mapped (in bytes).
* @param attr The attributes of the page table entries (e.g., read/write permissions, cache policies).
*
* @return On success, returns the starting virtual address `v_addr`;
* On failure, returns `NULL`.
*
* @note This function will not override existing page table entries.
* @warning The caller must ensure that `v_addr` and `p_addr` are page-aligned,
* and `size` is a multiple of the page size.
*
*/
void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
size_t size, size_t attr)
{
int ret = -1;
void *unmap_va = v_addr;
size_t npages = size >> ARCH_PAGE_SHIFT;
/* TODO trying with HUGEPAGE here */
while (npages--)
{
MM_PGTBL_LOCK(aspace);
ret = _map_one_page(aspace, v_addr, p_addr, attr);
MM_PGTBL_UNLOCK(aspace);
if (ret != 0)
{
/* error, undo map */
while (unmap_va != v_addr)
{
MM_PGTBL_LOCK(aspace);
_unmap_area(aspace, unmap_va, ARCH_PAGE_SIZE);
MM_PGTBL_UNLOCK(aspace);
unmap_va += ARCH_PAGE_SIZE;
}
break;
}
v_addr += ARCH_PAGE_SIZE;
p_addr += ARCH_PAGE_SIZE;
}
if (ret == 0)
{
return unmap_va;
}
return NULL;
}
/* unmap page table entry */
static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
{
int loop_flag = 1;
while (loop_flag)
{
loop_flag = 0;
*pentry = 0;
rt_hw_cpu_dcache_clean(pentry, sizeof(*pentry));
/* we don't handle level 0, which is maintained by caller */
if (level > 0)
{
void *page = (void *)((rt_ubase_t)pentry & ~ARCH_PAGE_MASK);
/* decrease reference from child page to parent */
rt_pages_free(page, 0);
int free = rt_page_ref_get(page, 0);
if (free == 1)
{
rt_pages_free(page, 0);
pentry = lvl_entry[--level];
loop_flag = 1;
}
}
}
}
/* Unmaps a virtual address range from the page table. */
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
{
rt_ubase_t loop_va = __UMASKVALUE((rt_ubase_t)v_addr, PAGE_OFFSET_MASK);
size_t unmapped = 0;
int i = 0;
rt_ubase_t lvl_off[3];
rt_ubase_t *lvl_entry[3];
lvl_off[0] = (rt_ubase_t)GET_L1(loop_va);
lvl_off[1] = (rt_ubase_t)GET_L2(loop_va);
lvl_off[2] = (rt_ubase_t)GET_L3(loop_va);
unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);
rt_ubase_t *pentry;
lvl_entry[i] = ((rt_ubase_t *)aspace->page_table + lvl_off[i]);
pentry = lvl_entry[i];
/* find leaf page table entry */
while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
{
i += 1;
lvl_entry[i] = ((rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
lvl_off[i]);
pentry = lvl_entry[i];
unmapped >>= ARCH_INDEX_WIDTH;
}
/* clear PTE & setup its */
if (PTE_USED(*pentry))
{
_unmap_pte(pentry, lvl_entry, i);
}
return unmapped;
}
/**
* @brief Unmaps a range of virtual memory addresses from the specified address space.
*
* This function is responsible for unmapping a contiguous region of virtual memory
* from the given address space. It handles multiple pages and ensures thread safety
* by locking the page table during the unmapping operation.
*
* @param aspace Pointer to the address space structure from which the memory will be unmapped.
* @param v_addr Starting virtual address to unmap. Must be page-aligned.
* @param size Size of the memory region to unmap. Must be page-aligned.
*
* @note The caller must ensure that both `v_addr` and `size` are page-aligned.
*
* @details The function operates in a loop, unmapping memory in chunks. It uses the
* `_unmap_area` function to perform the actual unmapping, which is called within a
* locked section to ensure thread safety. The loop continues until the entire region
* is unmapped.
*
* @see _unmap_area
* @note unmap is different from map that it can handle multiple pages
*/
void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
{
/* caller guarantee that v_addr & size are page aligned */
if (!aspace->page_table)
{
return;
}
size_t unmapped = 0;
while (size > 0)
{
MM_PGTBL_LOCK(aspace);
unmapped = _unmap_area(aspace, v_addr, size);
MM_PGTBL_UNLOCK(aspace);
/* when unmapped == 0, region not exist in pgtbl */
if (!unmapped || unmapped > size) break;
size -= unmapped;
v_addr += unmapped;
}
}
#ifdef RT_USING_SMART
static inline void _init_region(void *vaddr, size_t size)
{
rt_ioremap_start = vaddr;
rt_ioremap_size = size;
rt_mpr_start = rt_ioremap_start - rt_mpr_size;
LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start,
rt_mpr_start);
}
#else
static inline void _init_region(void *vaddr, size_t size)
{
rt_mpr_start = vaddr - rt_mpr_size;
}
#endif
#if defined(RT_USING_SMART) && defined(ARCH_REMAP_KERNEL)
#define KERN_SPACE_START ((void *)KERNEL_VADDR_START)
#define KERN_SPACE_SIZE (0xfffffffffffff000UL - KERNEL_VADDR_START + 0x1000)
#else
#define KERN_SPACE_START ((void *)0x1000)
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
#endif
/**
* @brief Initialize the MMU (Memory Management Unit) mapping.
*
* This function initializes the MMU mapping, incluing these steps as follows:
* 1. Check the validity of the input parameters,
* 2. Calculate the start and end virtual addresses based on the input virtual address and size.
* 3. Convert the virtual addresses to PPN2 indices.
* 4. Check the initialization of the page table. If any entry in the page table within
* the specified range is non-zero, it returns -1.
* 5. It initializes the kernel address space using rt_aspace_init() and initializes the specified region
* using _init_region.
*
* @param aspace Pointer to the address space. Must not be NULL.
* @param v_address The starting virtual address.
* @param size The size of the virtual address space.
* @param vtable Pointer to the page table. Must not be NULL.
* @param pv_off The page table offset.
*
* @return Returns 0 if the initialization is successful. Returns -1 if any input parameter is invalid
* or the page table initialization check fails.
*/
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
rt_ubase_t *vtable, rt_ubase_t pv_off)
{
size_t l1_off, va_s, va_e;
if ((!aspace) || (!vtable))
{
return -1;
}
va_s = (rt_ubase_t)v_address;
va_e = ((rt_ubase_t)v_address) + size - 1;
if (va_e < va_s)
{
return -1;
}
/* convert address to PPN2 index */
va_s = GET_L1(va_s);
va_e = GET_L1(va_e);
if (va_s == 0)
{
return -1;
}
/* vtable initialization check */
for (l1_off = va_s; l1_off <= va_e; l1_off++)
{
size_t v = vtable[l1_off];
if (v)
{
return -1;
}
}
rt_aspace_init(&rt_kernel_space, KERN_SPACE_START, KERN_SPACE_SIZE, vtable);
_init_region(v_address, size);
return 0;
}
const static int max_level =
(ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT) / ARCH_INDEX_WIDTH;
static inline uintptr_t _get_level_size(int level)
{
return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
}
static rt_ubase_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
{
rt_ubase_t l1_off, l2_off, l3_off;
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
rt_ubase_t pa;
l1_off = GET_L1((rt_uintptr_t)vaddr);
l2_off = GET_L2((rt_uintptr_t)vaddr);
l3_off = GET_L3((rt_uintptr_t)vaddr);
if (!aspace)
{
LOG_W("%s: no aspace", __func__);
return RT_NULL;
}
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
if (PTE_USED(*mmu_l1))
{
if (*mmu_l1 & PTE_XWR_MASK)
{
*level = 1;
return mmu_l1;
}
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
if (PTE_USED(*(mmu_l2 + l2_off)))
{
if (*(mmu_l2 + l2_off) & PTE_XWR_MASK)
{
*level = 2;
return mmu_l2 + l2_off;
}
mmu_l3 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
PV_OFFSET);
if (PTE_USED(*(mmu_l3 + l3_off)))
{
*level = 3;
return mmu_l3 + l3_off;
}
}
}
return RT_NULL;
}
/**
* @brief Translate a virtual address to a physical address.
*
* This function translates a given virtual address (`vaddr`) to its corresponding
* physical address (`paddr`) using the page table in the specified address space (`aspace`).
*
* @param aspace Pointer to the address space structure containing the page table.
* @param vaddr The virtual address to be translated.
*
* @return The translated physical address. If the translation fails, `ARCH_MAP_FAILED` is returned.
*
* @note The function queries the page table entry (PTE) for the virtual address using `_query`.
* If a valid PTE is found, the physical address is extracted and combined with the offset
* from the virtual address. If no valid PTE is found, a debug log is recorded, and
* `ARCH_MAP_FAILED` is returned.
*/
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
{
int level;
rt_ubase_t *pte = _query(aspace, vaddr, &level);
uintptr_t paddr;
if (pte)
{
paddr = GET_PADDR(*pte);
paddr |= ((intptr_t)vaddr & (_get_level_size(level) - 1));
}
else
{
LOG_D("%s: failed at %p", __func__, vaddr);
paddr = (uintptr_t)ARCH_MAP_FAILED;
}
return (void *)paddr;
}
static int _noncache(rt_base_t *pte)
{
return 0;
}
static int _cache(rt_base_t *pte)
{
return 0;
}
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_base_t *pte)=
{
[MMU_CNTL_CACHE] = _cache,
[MMU_CNTL_NONCACHE] = _noncache,
};
/**
* @brief Control the page table entries (PTEs) for a specified virtual address range.
*
* This function applies a control command (e.g., cache control) to the page table entries
* (PTEs) corresponding to the specified virtual address range (`vaddr` to `vaddr + size`).
*
* @param aspace Pointer to the address space structure containing the page table.
* @param vaddr The starting virtual address of the range.
* @param size The size of the virtual address range.
* @param cmd The control command to apply (e.g., `MMU_CNTL_CACHE`, `MMU_CNTL_NONCACHE`.etc.).
*
* @return `RT_EOK` on success, or an error code (`-RT_EINVAL` or `-RT_ENOSYS`) on failure.
*
* @note The function uses the `control_handler` array to map the command to a handler function.
* It iterates over the virtual address range, queries the PTEs, and applies the handler
* to each valid PTE. If the command is invalid, `-RT_ENOSYS` is returned.
*/
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd)
{
int level;
int err = -RT_EINVAL;
void *vend = vaddr + size;
int (*handler)(rt_base_t *pte);
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
{
handler = control_handler[cmd];
while (vaddr < vend)
{
rt_base_t *pte = _query(aspace, vaddr, &level);
void *range_end = vaddr + _get_level_size(level);
RT_ASSERT(range_end <= vend);
if (pte)
{
err = handler(pte);
RT_ASSERT(err == RT_EOK);
}
vaddr = range_end;
}
}
else
{
err = -RT_ENOSYS;
}
return err;
}
/**
* @brief setup Page Table for kernel space. It's a fixed map
* and all mappings cannot be changed after initialization.
*
* Memory region in struct mem_desc must be page aligned,
* otherwise is a failure and no report will be
* returned.
*
* @param aspace Pointer to the address space structure.
* @param mdesc Pointer to the array of memory descriptors.
* @param desc_nr Number of memory descriptors in the array.
*/
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
{
void *err;
for (size_t i = 0; i < desc_nr; i++)
{
size_t attr;
switch (mdesc->attr)
{
case NORMAL_MEM:
attr = MMU_MAP_K_RWCB;
break;
case NORMAL_NOCACHE_MEM:
attr = MMU_MAP_K_RWCB;
break;
case DEVICE_MEM:
attr = MMU_MAP_K_DEVICE;
break;
default:
attr = MMU_MAP_K_DEVICE;
}
struct rt_mm_va_hint hint = {
.flags = MMF_MAP_FIXED,
.limit_start = aspace->start,
.limit_range_size = aspace->size,
.map_size = mdesc->vaddr_end - mdesc->vaddr_start + 1,
.prefer = (void *)mdesc->vaddr_start};
if (mdesc->paddr_start == (rt_uintptr_t)ARCH_MAP_FAILED)
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
mdesc++;
}
rt_hw_asid_init();
rt_hw_aspace_switch(&rt_kernel_space);
rt_page_cleanup();
}
#define SATP_BASE ((rt_ubase_t)SATP_MODE << SATP_MODE_OFFSET)
void rt_hw_mem_setup_early(void)
{
rt_ubase_t pv_off;
rt_ubase_t ps = 0x0;
rt_ubase_t vs = 0x0;
rt_ubase_t *early_pgtbl = (rt_ubase_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
/* calculate pv_offset */
void *symb_pc;
void *symb_linker;
__asm__ volatile("la %0, _start\n" : "=r"(symb_pc));
__asm__ volatile("la %0, _start_link_addr\n" : "=r"(symb_linker));
symb_linker = *(void **)symb_linker;
pv_off = symb_pc - symb_linker;
rt_kmem_pvoff_set(pv_off);
if (pv_off)
{
if (pv_off & ((1ul << (ARCH_INDEX_WIDTH * 2 + ARCH_PAGE_SHIFT)) - 1))
{
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__,
pv_off);
RT_ASSERT(0);
}
/**
* identical mapping,
* PC are still at lower region before relocating to high memory
*/
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
{
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}
/* relocate text region */
__asm__ volatile("la %0, _start\n" : "=r"(ps));
ps &= ~(L1_PAGE_SIZE - 1);
vs = ps - pv_off;
/* relocate region */
rt_ubase_t vs_idx = GET_L1(vs);
rt_ubase_t ve_idx = GET_L1(vs + 0x80000000);
for (size_t i = vs_idx; i < ve_idx; i++)
{
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}
/* apply new mapping */
asm volatile("sfence.vma x0, x0");
write_csr(satp, SATP_BASE | ((size_t)early_pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0, x0");
}
/* return to lower text section */
}
/**
* @brief Creates and initializes a new MMU page table.
*
* This function allocates a new MMU page table, copies the kernel space
* page table into it, and flushes the data cache to ensure consistency.
*
* @return
* - A pointer to the newly allocated MMU page table on success.
* - RT_NULL if the allocation fails.
*/
void *rt_hw_mmu_pgtbl_create(void)
{
rt_ubase_t *mmu_table;
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return RT_NULL;
}
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
return mmu_table;
}
/**
* @brief Deletes an MMU page table.
*
* This function frees the memory allocated for the given MMU page table.
*
* @param pgtbl Pointer to the MMU page table to be deleted.
*/
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
{
rt_pages_free(pgtbl, 0);
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2023-10-12 Shell Add permission control API
*/
#ifndef __MMU_H__
#define __MMU_H__
#include "riscv.h"
#include "riscv_mmu.h"
#include <mm_aspace.h>
#include <stddef.h>
/* RAM, Flash, or ROM */
#define NORMAL_MEM 0
/* normal nocache memory mapping type */
#define NORMAL_NOCACHE_MEM 1
/* MMIO region */
#define DEVICE_MEM 2
typedef size_t rt_pte_t;
struct mem_desc
{
rt_size_t vaddr_start;
rt_size_t vaddr_end;
rt_ubase_t paddr_start;
rt_size_t attr;
struct rt_varea varea;
};
#define GET_PF_ID(addr) ((addr) >> PAGE_OFFSET_BIT)
#define GET_PF_OFFSET(addr) __MASKVALUE(addr, PAGE_OFFSET_MASK)
#define GET_L1(addr) __PARTBIT(addr, VPN2_SHIFT, VPN2_BIT)
#define GET_L2(addr) __PARTBIT(addr, VPN1_SHIFT, VPN1_BIT)
#define GET_L3(addr) __PARTBIT(addr, VPN0_SHIFT, VPN0_BIT)
#define GET_PPN(pte) \
(__PARTBIT(pte, PTE_PPN_SHIFT, PHYSICAL_ADDRESS_WIDTH_BITS - PAGE_OFFSET_BIT))
#define GET_PADDR(pte) (GET_PPN(pte) << PAGE_OFFSET_BIT)
#define VPN_TO_PPN(vaddr, pv_off) (((rt_uintptr_t)(vaddr)) + (pv_off))
#define PPN_TO_VPN(paddr, pv_off) (((rt_uintptr_t)(paddr)) - (pv_off))
#define COMBINEVADDR(l1_off, l2_off, l3_off) \
(((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) | \
((l3_off) << VPN0_SHIFT))
#define COMBINEPTE(paddr, attr) \
((((paddr) >> PAGE_OFFSET_BIT) << PTE_PPN_SHIFT) | (attr))
#define MMU_MAP_ERROR_VANOTALIGN -1
#define MMU_MAP_ERROR_PANOTALIGN -2
#define MMU_MAP_ERROR_NOPAGE -3
#define MMU_MAP_ERROR_CONFLICT -4
void *rt_hw_mmu_tbl_get(void);
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
rt_ubase_t *vtable, rt_ubase_t pv_off);
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr);
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_ubase_t vaddr_start,
rt_ubase_t size);
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
size_t attr);
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size);
void rt_hw_aspace_switch(rt_aspace_t aspace);
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *vaddr);
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd);
void *rt_hw_mmu_pgtbl_create(void);
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
#endif

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2024-08-28 RT-Thread Fit into rv64ilp32 ABI
*/
#ifndef __RISCV_H__
#define __RISCV_H__
#include <encoding.h>
/* using unsigned long long for the case of rv64ilp32 */
#define __SIZE(bit) (1ULL << (bit))
#define __MASK(bit) (__SIZE(bit) - 1ULL)
#define __UMASK(bit) (~(__MASK(bit)))
#define __MASKVALUE(value,maskvalue) ((value) & (maskvalue))
#define __UMASKVALUE(value,maskvalue) ((value) & (~(maskvalue)))
#define __CHECKUPBOUND(value,bit_count) (!(((rt_ubase_t)value) & (~__MASK(bit_count))))
#define __CHECKALIGN(value,start_bit) (!(((rt_ubase_t)value) & (__MASK(start_bit))))
#define __PARTBIT(value,start_bit,length) (((value) >> (start_bit)) & __MASK(length))
#define __ALIGNUP(value,bit) (((value) + __MASK(bit)) & __UMASK(bit))
#define __ALIGNDOWN(value,bit) ((value) & __UMASK(bit))
#endif

View File

@ -0,0 +1,115 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#ifndef __RISCV_IO_H__
#define __RISCV_IO_H__
static inline uint32_t __raw_hartid(void)
{
extern int boot_hartid;
return boot_hartid;
}
static inline void __raw_writeb(rt_uint8_t val, volatile void *addr)
{
asm volatile("sb %0, 0(%1)" : : "r"(val), "r"(addr));
}
static inline void __raw_writew(rt_uint16_t val, volatile void *addr)
{
asm volatile("sh %0, 0(%1)" : : "r"(val), "r"(addr));
}
static inline void __raw_writel(rt_uint32_t val, volatile void *addr)
{
asm volatile("sw %0, 0(%1)" : : "r"(val), "r"(addr));
}
#if __riscv_xlen != 32
static inline void __raw_writeq(rt_uint64_t val, volatile void *addr)
{
asm volatile("sd %0, 0(%1)" : : "r"(val), "r"(addr));
}
#endif
static inline rt_uint8_t __raw_readb(const volatile void *addr)
{
rt_uint8_t val;
asm volatile("lb %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
static inline rt_uint16_t __raw_readw(const volatile void *addr)
{
rt_uint16_t val;
asm volatile("lh %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
static inline rt_uint32_t __raw_readl(const volatile void *addr)
{
rt_uint32_t val;
asm volatile("lw %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
#if __riscv_xlen != 32
static inline rt_uint64_t __raw_readq(const volatile void *addr)
{
rt_uint64_t val;
asm volatile("ld %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
#endif
/* FIXME: These are now the same as asm-generic */
/* clang-format off */
#define __io_rbr() do {} while (0)
#define __io_rar() do {} while (0)
#define __io_rbw() do {} while (0)
#define __io_raw() do {} while (0)
#define readb_relaxed(c) ({ rt_uint8_t __v; __io_rbr(); __v = __raw_readb(c); __io_rar(); __v; })
#define readw_relaxed(c) ({ rt_uint16_t __v; __io_rbr(); __v = __raw_readw(c); __io_rar(); __v; })
#define readl_relaxed(c) ({ rt_uint32_t __v; __io_rbr(); __v = __raw_readl(c); __io_rar(); __v; })
#define writeb_relaxed(v,c) ({ __io_rbw(); __raw_writeb((v),(c)); __io_raw(); })
#define writew_relaxed(v,c) ({ __io_rbw(); __raw_writew((v),(c)); __io_raw(); })
#define writel_relaxed(v,c) ({ __io_rbw(); __raw_writel((v),(c)); __io_raw(); })
#if __riscv_xlen != 32
#define readq_relaxed(c) ({ rt_uint64_t __v; __io_rbr(); __v = __raw_readq(c); __io_rar(); __v; })
#define writeq_relaxed(v,c) ({ __io_rbw(); __raw_writeq((v),(c)); __io_raw(); })
#endif
#define __io_br() do {} while (0)
#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
#define __io_aw() do {} while (0)
#define readb(c) ({ rt_uint8_t __v; __io_br(); __v = __raw_readb(c); __io_ar(); __v; })
#define readw(c) ({ rt_uint16_t __v; __io_br(); __v = __raw_readw(c); __io_ar(); __v; })
#define readl(c) ({ rt_uint32_t __v; __io_br(); __v = __raw_readl(c); __io_ar(); __v; })
#define writeb(v,c) ({ __io_bw(); __raw_writeb((v),(c)); __io_aw(); })
#define writew(v,c) ({ __io_bw(); __raw_writew((v),(c)); __io_aw(); })
#define writel(v,c) ({ __io_bw(); __raw_writel((v),(c)); __io_aw(); })
#if __riscv_xlen != 32
#define readq(c) ({ rt_uint64_t __v; __io_br(); __v = __raw_readq(c); __io_ar(); __v; })
#define writeq(v,c) ({ __io_bw(); __raw_writeq((v),(c)); __io_aw(); })
#endif
#endif

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#include <riscv.h>
#include <string.h>
#include <stdlib.h>
#include "riscv_mmu.h"
void mmu_enable_user_page_access(void)
{
set_csr(sstatus, SSTATUS_SUM);
}
void mmu_disable_user_page_access(void)
{
clear_csr(sstatus, SSTATUS_SUM);
}

View File

@ -0,0 +1,264 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Bernard port from FreeBSD
*/
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "sbi.h"
#include <rtthread.h>
#include <stdbool.h>
/* SBI Implementation-Specific Definitions */
#define OPENSBI_VERSION_MAJOR_OFFSET 16
#define OPENSBI_VERSION_MINOR_MASK 0xFFFF
unsigned long sbi_spec_version;
unsigned long sbi_impl_id;
unsigned long sbi_impl_version;
static bool has_time_extension = false;
static bool has_ipi_extension = false;
static bool has_rfnc_extension = false;
static struct sbi_ret sbi_get_spec_version(void)
{
return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_SPEC_VERSION));
}
static struct sbi_ret sbi_get_impl_id(void)
{
return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_ID));
}
static struct sbi_ret sbi_get_impl_version(void)
{
return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_VERSION));
}
void sbi_print_version(void)
{
uint32_t major;
uint32_t minor;
/* For legacy SBI implementations. */
if (sbi_spec_version == 0)
{
rt_kprintf("SBI: Unknown (Legacy) Implementation\n");
rt_kprintf("SBI Specification Version: 0.1\n");
return;
}
switch (sbi_impl_id)
{
case (SBI_IMPL_ID_BBL):
rt_kprintf("SBI: Berkely Boot Loader %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_XVISOR):
rt_kprintf("SBI: eXtensible Versatile hypervISOR %lu\n",
sbi_impl_version);
break;
case (SBI_IMPL_ID_KVM):
rt_kprintf("SBI: Kernel-based Virtual Machine %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_RUSTSBI):
rt_kprintf("SBI: RustSBI %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_DIOSIX):
rt_kprintf("SBI: Diosix %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_OPENSBI):
major = sbi_impl_version >> OPENSBI_VERSION_MAJOR_OFFSET;
minor = sbi_impl_version & OPENSBI_VERSION_MINOR_MASK;
rt_kprintf("SBI: OpenSBI v%u.%u\n", major, minor);
break;
default:
rt_kprintf("SBI: Unrecognized Implementation: %lu\n", sbi_impl_id);
break;
}
major = (sbi_spec_version & SBI_SPEC_VERS_MAJOR_MASK) >>
SBI_SPEC_VERS_MAJOR_OFFSET;
minor = (sbi_spec_version & SBI_SPEC_VERS_MINOR_MASK);
rt_kprintf("SBI Specification Version: %u.%u\n", major, minor);
}
void sbi_set_timer(uint64_t val)
{
struct sbi_ret ret;
/* Use the TIME legacy replacement extension, if available. */
if (has_time_extension)
{
ret = SBI_CALL1(SBI_EXT_ID_TIME, SBI_TIME_SET_TIMER, val);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL1(SBI_SET_TIMER, 0, val);
}
}
void sbi_send_ipi(const unsigned long *hart_mask)
{
struct sbi_ret ret;
/* Use the IPI legacy replacement extension, if available. */
if (has_ipi_extension)
{
ret = SBI_CALL2(SBI_EXT_ID_IPI, SBI_IPI_SEND_IPI, *hart_mask, 0);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL1(SBI_SEND_IPI, 0, (uint64_t)hart_mask);
}
}
void sbi_remote_fence_i(const unsigned long *hart_mask)
{
struct sbi_ret ret;
/* Use the RFENCE legacy replacement extension, if available. */
if (has_rfnc_extension)
{
ret =
SBI_CALL2(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_FENCE_I, *hart_mask, 0);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL1(SBI_REMOTE_FENCE_I, 0, (uint64_t)hart_mask);
}
}
int sbi_remote_sfence_vma(const unsigned long *hart_mask,
const unsigned long hart_mask_base,
unsigned long start, unsigned long size)
{
struct sbi_ret ret = {.error = SBI_SUCCESS};
/* Use the RFENCE legacy replacement extension, if available. */
if (has_rfnc_extension)
{
ret = SBI_CALL4(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_SFENCE_VMA, *hart_mask,
hart_mask_base, start, size);
}
else
{
(void)SBI_CALL3(SBI_REMOTE_SFENCE_VMA, 0, (uint64_t)hart_mask, start,
size);
}
return ret.error;
}
void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
unsigned long start, unsigned long size,
unsigned long asid)
{
struct sbi_ret ret;
/* Use the RFENCE legacy replacement extension, if available. */
if (has_rfnc_extension)
{
ret = SBI_CALL5(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_SFENCE_VMA_ASID,
*hart_mask, 0, start, size, asid);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL4(SBI_REMOTE_SFENCE_VMA_ASID, 0, (uint64_t)hart_mask,
start, size, asid);
}
}
int sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr,
unsigned long priv)
{
struct sbi_ret ret;
ret = SBI_CALL3(SBI_EXT_ID_HSM, SBI_HSM_HART_START, hart, start_addr, priv);
return (ret.error != 0 ? (int)ret.error : 0);
}
void sbi_hsm_hart_stop(void)
{
(void)SBI_CALL0(SBI_EXT_ID_HSM, SBI_HSM_HART_STOP);
}
int sbi_hsm_hart_status(unsigned long hart)
{
struct sbi_ret ret;
ret = SBI_CALL1(SBI_EXT_ID_HSM, SBI_HSM_HART_STATUS, hart);
return (ret.error != 0 ? (int)ret.error : (int)ret.value);
}
void sbi_init(void)
{
struct sbi_ret sret;
/*
* Get the spec version. For legacy SBI implementations this will
* return an error, otherwise it is guaranteed to succeed.
*/
sret = sbi_get_spec_version();
if (sret.error != 0)
{
/* We are running a legacy SBI implementation. */
sbi_spec_version = 0;
return;
}
/* Set the SBI implementation info. */
sbi_spec_version = sret.value;
sbi_impl_id = sbi_get_impl_id().value;
sbi_impl_version = sbi_get_impl_version().value;
/* Probe for legacy replacement extensions. */
if (sbi_probe_extension(SBI_EXT_ID_TIME) != 0)
has_time_extension = true;
if (sbi_probe_extension(SBI_EXT_ID_IPI) != 0)
has_ipi_extension = true;
if (sbi_probe_extension(SBI_EXT_ID_RFNC) != 0)
has_rfnc_extension = true;
}
void rt_hw_console_output(const char *str)
{
while (*str)
{
sbi_console_putchar(*str++);
}
}

View File

@ -0,0 +1,244 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Bernard port from FreeBSD
*/
/*-
* Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
* Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
*
* Portions of this software were developed by SRI International and the
* University of Cambridge Computer Laboratory under DARPA/AFRL contract
* FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Portions of this software were developed by the University of Cambridge
* Computer Laboratory as part of the CTSRD Project, with support from the
* UK Higher Education Innovation Fund (HEIF).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_SBI_H_
#define _MACHINE_SBI_H_
#include <stdint.h>
#include <rtdef.h>
/* SBI Specification Version */
#define SBI_SPEC_VERS_MAJOR_OFFSET 24
#define SBI_SPEC_VERS_MAJOR_MASK (0x7F << SBI_SPEC_VERS_MAJOR_OFFSET)
#define SBI_SPEC_VERS_MINOR_OFFSET 0
#define SBI_SPEC_VERS_MINOR_MASK (0xFFFFFF << SBI_SPEC_VERS_MINOR_OFFSET)
/* SBI Implementation IDs */
#define SBI_IMPL_ID_BBL 0
#define SBI_IMPL_ID_OPENSBI 1
#define SBI_IMPL_ID_XVISOR 2
#define SBI_IMPL_ID_KVM 3
#define SBI_IMPL_ID_RUSTSBI 4
#define SBI_IMPL_ID_DIOSIX 5
/* SBI Error Codes */
#define SBI_SUCCESS 0
#define SBI_ERR_FAILURE -1
#define SBI_ERR_NOT_SUPPORTED -2
#define SBI_ERR_INVALID_PARAM -3
#define SBI_ERR_DENIED -4
#define SBI_ERR_INVALID_ADDRESS -5
#define SBI_ERR_ALREADY_AVAILABLE -6
/* SBI Base Extension */
#define SBI_EXT_ID_BASE 0x10
#define SBI_BASE_GET_SPEC_VERSION 0
#define SBI_BASE_GET_IMPL_ID 1
#define SBI_BASE_GET_IMPL_VERSION 2
#define SBI_BASE_PROBE_EXTENSION 3
#define SBI_BASE_GET_MVENDORID 4
#define SBI_BASE_GET_MARCHID 5
#define SBI_BASE_GET_MIMPID 6
/* Timer (TIME) Extension */
#define SBI_EXT_ID_TIME 0x54494D45
#define SBI_TIME_SET_TIMER 0
/* IPI (IPI) Extension */
#define SBI_EXT_ID_IPI 0x735049
#define SBI_IPI_SEND_IPI 0
/* RFENCE (RFNC) Extension */
#define SBI_EXT_ID_RFNC 0x52464E43
#define SBI_RFNC_REMOTE_FENCE_I 0
#define SBI_RFNC_REMOTE_SFENCE_VMA 1
#define SBI_RFNC_REMOTE_SFENCE_VMA_ASID 2
#define SBI_RFNC_REMOTE_HFENCE_GVMA_VMID 3
#define SBI_RFNC_REMOTE_HFENCE_GVMA 4
#define SBI_RFNC_REMOTE_HFENCE_VVMA_ASID 5
#define SBI_RFNC_REMOTE_HFENCE_VVMA 6
/* Hart State Management (HSM) Extension */
#define SBI_EXT_ID_HSM 0x48534D
#define SBI_HSM_HART_START 0
#define SBI_HSM_HART_STOP 1
#define SBI_HSM_HART_STATUS 2
#define SBI_HSM_STATUS_STARTED 0
#define SBI_HSM_STATUS_STOPPED 1
#define SBI_HSM_STATUS_START_PENDING 2
#define SBI_HSM_STATUS_STOP_PENDING 3
/* Legacy Extensions */
#define SBI_SET_TIMER 0
#define SBI_CONSOLE_PUTCHAR 1
#define SBI_CONSOLE_GETCHAR 2
#define SBI_CLEAR_IPI 3
#define SBI_SEND_IPI 4
#define SBI_REMOTE_FENCE_I 5
#define SBI_REMOTE_SFENCE_VMA 6
#define SBI_REMOTE_SFENCE_VMA_ASID 7
#define SBI_SHUTDOWN 8
#define SBI_CALL0(e, f) SBI_CALL5(e, f, 0, 0, 0, 0, 0)
#define SBI_CALL1(e, f, p1) SBI_CALL5(e, f, p1, 0, 0, 0, 0)
#define SBI_CALL2(e, f, p1, p2) SBI_CALL5(e, f, p1, p2, 0, 0, 0)
#define SBI_CALL3(e, f, p1, p2, p3) SBI_CALL5(e, f, p1, p2, p3, 0, 0)
#define SBI_CALL4(e, f, p1, p2, p3, p4) SBI_CALL5(e, f, p1, p2, p3, p4, 0)
#define SBI_CALL5(e, f, p1, p2, p3, p4, p5) sbi_call(e, f, p1, p2, p3, p4, p5)
/*
* Documentation available at
* https://github.com/riscv/riscv-sbi-doc/blob/master/riscv-sbi.adoc
*/
struct sbi_ret
{
long error;
long value;
};
rt_inline struct sbi_ret
sbi_call(uint64_t arg7, uint64_t arg6, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4)
{
struct sbi_ret ret;
register uintptr_t a0 __asm("a0") = (uintptr_t)(arg0);
register uintptr_t a1 __asm("a1") = (uintptr_t)(arg1);
register uintptr_t a2 __asm("a2") = (uintptr_t)(arg2);
register uintptr_t a3 __asm("a3") = (uintptr_t)(arg3);
register uintptr_t a4 __asm("a4") = (uintptr_t)(arg4);
register uintptr_t a6 __asm("a6") = (uintptr_t)(arg6);
register uintptr_t a7 __asm("a7") = (uintptr_t)(arg7);
__asm __volatile(\
"ecall" \
: "+r"(a0), "+r"(a1) \
: "r"(a2), "r"(a3), "r"(a4), "r"(a6), "r"(a7) \
: "memory");
ret.error = a0;
ret.value = a1;
return (ret);
}
/* Base extension functions and variables. */
extern unsigned long sbi_spec_version;
extern unsigned long sbi_impl_id;
extern unsigned long sbi_impl_version;
static __inline long
sbi_probe_extension(long id)
{
return (SBI_CALL1(SBI_EXT_ID_BASE, SBI_BASE_PROBE_EXTENSION, id).value);
}
/* TIME extension functions. */
void sbi_set_timer(uint64_t val);
/* IPI extension functions. */
void sbi_send_ipi(const unsigned long *hart_mask);
/* RFENCE extension functions. */
void sbi_remote_fence_i(const unsigned long *hart_mask);
int sbi_remote_sfence_vma(const unsigned long *hart_mask,
const unsigned long hart_mask_base,
unsigned long start, unsigned long size);
void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start,
unsigned long size, unsigned long asid);
/* Hart State Management extension functions. */
/*
* Start execution on the specified hart at physical address start_addr. The
* register a0 will contain the hart's ID, and a1 will contain the value of
* priv.
*/
int sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr, unsigned long priv);
/*
* Stop execution on the current hart. Interrupts should be disabled, or this
* function may return.
*/
void sbi_hsm_hart_stop(void);
/*
* Get the execution status of the specified hart. The status will be one of:
* - SBI_HSM_STATUS_STARTED
* - SBI_HSM_STATUS_STOPPED
* - SBI_HSM_STATUS_START_PENDING
* - SBI_HSM_STATUS_STOP_PENDING
*/
int sbi_hsm_hart_status(unsigned long hart);
/* Legacy extension functions. */
static __inline void
sbi_console_putchar(int ch)
{
(void)SBI_CALL1(SBI_CONSOLE_PUTCHAR, 0, ch);
}
static __inline int
sbi_console_getchar(void)
{
/*
* XXX: The "error" is returned here because legacy SBI functions
* continue to return their value in a0.
*/
return (SBI_CALL0(SBI_CONSOLE_GETCHAR, 0).error);
}
static __inline void
sbi_shutdown(void)
{
(void)SBI_CALL0(SBI_SHUTDOWN, 0);
}
void sbi_print_version(void);
void sbi_init(void);
#endif /* !_MACHINE_SBI_H_ */

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2021-11-18 JasonHu add fpu member
* 2022-10-22 Shell Support kernel mode RVV
*/
#ifndef __STACK_H__
#define __STACK_H__
#include "stackframe.h"
#include <rtthread.h>
typedef struct rt_hw_switch_frame
{
uint64_t regs[RT_HW_SWITCH_CONTEXT_SIZE];
} *rt_hw_switch_frame_t;
struct rt_hw_stack_frame
{
rt_ubase_t epc; /* epc - epc - program counter */
rt_ubase_t ra; /* x1 - ra - return address for jumps */
rt_ubase_t sstatus; /* - supervisor status register */
rt_ubase_t gp; /* x3 - gp - global pointer */
rt_ubase_t tp; /* x4 - tp - thread pointer */
rt_ubase_t t0; /* x5 - t0 - temporary register 0 */
rt_ubase_t t1; /* x6 - t1 - temporary register 1 */
rt_ubase_t t2; /* x7 - t2 - temporary register 2 */
rt_ubase_t s0_fp; /* x8 - s0/fp - saved register 0 or frame pointer */
rt_ubase_t s1; /* x9 - s1 - saved register 1 */
rt_ubase_t a0; /* x10 - a0 - return value or function argument 0 */
rt_ubase_t a1; /* x11 - a1 - return value or function argument 1 */
rt_ubase_t a2; /* x12 - a2 - function argument 2 */
rt_ubase_t a3; /* x13 - a3 - function argument 3 */
rt_ubase_t a4; /* x14 - a4 - function argument 4 */
rt_ubase_t a5; /* x15 - a5 - function argument 5 */
rt_ubase_t a6; /* x16 - a6 - function argument 6 */
rt_ubase_t a7; /* x17 - s7 - function argument 7 */
rt_ubase_t s2; /* x18 - s2 - saved register 2 */
rt_ubase_t s3; /* x19 - s3 - saved register 3 */
rt_ubase_t s4; /* x20 - s4 - saved register 4 */
rt_ubase_t s5; /* x21 - s5 - saved register 5 */
rt_ubase_t s6; /* x22 - s6 - saved register 6 */
rt_ubase_t s7; /* x23 - s7 - saved register 7 */
rt_ubase_t s8; /* x24 - s8 - saved register 8 */
rt_ubase_t s9; /* x25 - s9 - saved register 9 */
rt_ubase_t s10; /* x26 - s10 - saved register 10 */
rt_ubase_t s11; /* x27 - s11 - saved register 11 */
rt_ubase_t t3; /* x28 - t3 - temporary register 3 */
rt_ubase_t t4; /* x29 - t4 - temporary register 4 */
rt_ubase_t t5; /* x30 - t5 - temporary register 5 */
rt_ubase_t t6; /* x31 - t6 - temporary register 6 */
rt_ubase_t user_sp_exc_stack; /* sscratch - user mode sp/exception stack */
rt_ubase_t __padding; /* align to 16bytes */
#ifdef ARCH_RISCV_FPU
rt_ubase_t f[CTX_FPU_REG_NR]; /* f0~f31 */
#endif /* ARCH_RISCV_FPU */
#ifdef ARCH_RISCV_VECTOR
rt_ubase_t v[CTX_VECTOR_REG_NR];
#endif /* ARCH_RISCV_VECTOR */
};
#endif

View File

@ -0,0 +1,313 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-02-02 lizhirui first version
* 2021-02-11 lizhirui fixed gp save/store bug
* 2021-11-18 JasonHu add fpu registers save/restore
* 2022-10-22 Shell Support kernel mode RVV
*/
#ifndef __STACKFRAME_H__
#define __STACKFRAME_H__
#include <rtconfig.h>
#include "encoding.h"
/* bytes of register width */
#ifdef ARCH_CPU_64BIT
#define STORE sd
#define LOAD ld
#define FSTORE fsd
#define FLOAD fld
#define REGBYTES 8
#else
// error here, not portable
#error "Not supported XLEN"
#endif
#include "ext_context.h"
/* 33 general register + 1 padding */
#define CTX_GENERAL_REG_NR 34
/* all context registers */
#define CTX_REG_NR (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR + CTX_VECTOR_REG_NR)
#define BYTES(idx) ((idx) * REGBYTES)
#define FRAME_OFF_SSTATUS BYTES(2)
#define FRAME_OFF_SP BYTES(32)
#define FRAME_OFF_GP BYTES(3)
/* switch frame */
#define RT_HW_SWITCH_CONTEXT_SSTATUS 0
#define RT_HW_SWITCH_CONTEXT_S11 1
#define RT_HW_SWITCH_CONTEXT_S10 2
#define RT_HW_SWITCH_CONTEXT_S9 3
#define RT_HW_SWITCH_CONTEXT_S8 4
#define RT_HW_SWITCH_CONTEXT_S7 5
#define RT_HW_SWITCH_CONTEXT_S6 6
#define RT_HW_SWITCH_CONTEXT_S5 7
#define RT_HW_SWITCH_CONTEXT_S4 8
#define RT_HW_SWITCH_CONTEXT_S3 9
#define RT_HW_SWITCH_CONTEXT_S2 10
#define RT_HW_SWITCH_CONTEXT_S1 11
#define RT_HW_SWITCH_CONTEXT_S0 12
#define RT_HW_SWITCH_CONTEXT_RA 13
#define RT_HW_SWITCH_CONTEXT_TP 14
#define RT_HW_SWITCH_CONTEXT_ALIGNMENT 15 // Padding for alignment
#define RT_HW_SWITCH_CONTEXT_SIZE 16 // Total size of the structure
#ifdef __ASSEMBLY__
.macro SAVE_ALL
#ifdef ARCH_RISCV_FPU
/* reserve float registers */
addi sp, sp, -CTX_FPU_REG_NR * REGBYTES
#endif /* ARCH_RISCV_FPU */
#ifdef ARCH_RISCV_VECTOR
/* reserve float registers */
addi sp, sp, -CTX_VECTOR_REG_NR * REGBYTES
#endif /* ARCH_RISCV_VECTOR */
/* save general registers */
addi sp, sp, -CTX_GENERAL_REG_NR * REGBYTES
STORE x1, 1 * REGBYTES(sp)
csrr x1, sstatus
STORE x1, FRAME_OFF_SSTATUS(sp)
csrr x1, sepc
STORE x1, 0 * REGBYTES(sp)
STORE x3, 3 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp) /* save tp */
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
csrr t0, sscratch
STORE t0, 32 * REGBYTES(sp)
#ifdef ARCH_RISCV_FPU
/* backup sp and adjust sp to save float registers */
mv t1, sp
addi t1, t1, CTX_GENERAL_REG_NR * REGBYTES
li t0, SSTATUS_FS
csrs sstatus, t0
FSTORE f0, FPU_CTX_F0_OFF(t1)
FSTORE f1, FPU_CTX_F1_OFF(t1)
FSTORE f2, FPU_CTX_F2_OFF(t1)
FSTORE f3, FPU_CTX_F3_OFF(t1)
FSTORE f4, FPU_CTX_F4_OFF(t1)
FSTORE f5, FPU_CTX_F5_OFF(t1)
FSTORE f6, FPU_CTX_F6_OFF(t1)
FSTORE f7, FPU_CTX_F7_OFF(t1)
FSTORE f8, FPU_CTX_F8_OFF(t1)
FSTORE f9, FPU_CTX_F9_OFF(t1)
FSTORE f10, FPU_CTX_F10_OFF(t1)
FSTORE f11, FPU_CTX_F11_OFF(t1)
FSTORE f12, FPU_CTX_F12_OFF(t1)
FSTORE f13, FPU_CTX_F13_OFF(t1)
FSTORE f14, FPU_CTX_F14_OFF(t1)
FSTORE f15, FPU_CTX_F15_OFF(t1)
FSTORE f16, FPU_CTX_F16_OFF(t1)
FSTORE f17, FPU_CTX_F17_OFF(t1)
FSTORE f18, FPU_CTX_F18_OFF(t1)
FSTORE f19, FPU_CTX_F19_OFF(t1)
FSTORE f20, FPU_CTX_F20_OFF(t1)
FSTORE f21, FPU_CTX_F21_OFF(t1)
FSTORE f22, FPU_CTX_F22_OFF(t1)
FSTORE f23, FPU_CTX_F23_OFF(t1)
FSTORE f24, FPU_CTX_F24_OFF(t1)
FSTORE f25, FPU_CTX_F25_OFF(t1)
FSTORE f26, FPU_CTX_F26_OFF(t1)
FSTORE f27, FPU_CTX_F27_OFF(t1)
FSTORE f28, FPU_CTX_F28_OFF(t1)
FSTORE f29, FPU_CTX_F29_OFF(t1)
FSTORE f30, FPU_CTX_F30_OFF(t1)
FSTORE f31, FPU_CTX_F31_OFF(t1)
/* clr FS domain */
csrc sstatus, t0
/* clean status would clr sr_sd; */
li t0, SSTATUS_FS_CLEAN
csrs sstatus, t0
#endif /* ARCH_RISCV_FPU */
#ifdef ARCH_RISCV_VECTOR
csrr t0, sstatus
andi t0, t0, SSTATUS_VS
beqz t0, 0f
/* push vector frame */
addi t1, sp, (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR) * REGBYTES
SAVE_VECTOR t1
0:
#endif /* ARCH_RISCV_VECTOR */
.endm
/**
* @brief Restore All General Registers, for interrupt handling
*
*/
.macro RESTORE_ALL
#ifdef ARCH_RISCV_VECTOR
// skip on close
ld t0, 2 * REGBYTES(sp)
// cannot use vector on initial
andi t0, t0, SSTATUS_VS_CLEAN
beqz t0, 0f
/* push vector frame */
addi t1, sp, (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR) * REGBYTES
RESTORE_VECTOR t1
0:
#endif /* ARCH_RISCV_VECTOR */
#ifdef ARCH_RISCV_FPU
/* restore float register */
addi t2, sp, CTX_GENERAL_REG_NR * REGBYTES
li t0, SSTATUS_FS
csrs sstatus, t0
FLOAD f0, FPU_CTX_F0_OFF(t2)
FLOAD f1, FPU_CTX_F1_OFF(t2)
FLOAD f2, FPU_CTX_F2_OFF(t2)
FLOAD f3, FPU_CTX_F3_OFF(t2)
FLOAD f4, FPU_CTX_F4_OFF(t2)
FLOAD f5, FPU_CTX_F5_OFF(t2)
FLOAD f6, FPU_CTX_F6_OFF(t2)
FLOAD f7, FPU_CTX_F7_OFF(t2)
FLOAD f8, FPU_CTX_F8_OFF(t2)
FLOAD f9, FPU_CTX_F9_OFF(t2)
FLOAD f10, FPU_CTX_F10_OFF(t2)
FLOAD f11, FPU_CTX_F11_OFF(t2)
FLOAD f12, FPU_CTX_F12_OFF(t2)
FLOAD f13, FPU_CTX_F13_OFF(t2)
FLOAD f14, FPU_CTX_F14_OFF(t2)
FLOAD f15, FPU_CTX_F15_OFF(t2)
FLOAD f16, FPU_CTX_F16_OFF(t2)
FLOAD f17, FPU_CTX_F17_OFF(t2)
FLOAD f18, FPU_CTX_F18_OFF(t2)
FLOAD f19, FPU_CTX_F19_OFF(t2)
FLOAD f20, FPU_CTX_F20_OFF(t2)
FLOAD f21, FPU_CTX_F21_OFF(t2)
FLOAD f22, FPU_CTX_F22_OFF(t2)
FLOAD f23, FPU_CTX_F23_OFF(t2)
FLOAD f24, FPU_CTX_F24_OFF(t2)
FLOAD f25, FPU_CTX_F25_OFF(t2)
FLOAD f26, FPU_CTX_F26_OFF(t2)
FLOAD f27, FPU_CTX_F27_OFF(t2)
FLOAD f28, FPU_CTX_F28_OFF(t2)
FLOAD f29, FPU_CTX_F29_OFF(t2)
FLOAD f30, FPU_CTX_F30_OFF(t2)
FLOAD f31, FPU_CTX_F31_OFF(t2)
/* clr FS domain */
csrc sstatus, t0
/* clean status would clr sr_sd; */
li t0, SSTATUS_FS_CLEAN
csrs sstatus, t0
#endif /* ARCH_RISCV_FPU */
/* restore general register */
addi t0, sp, CTX_REG_NR * REGBYTES
csrw sscratch, t0
/* resw ra to sepc */
LOAD x1, 0 * REGBYTES(sp)
csrw sepc, x1
LOAD x1, 2 * REGBYTES(sp)
csrw sstatus, x1
LOAD x1, 1 * REGBYTES(sp)
LOAD x3, 3 * REGBYTES(sp)
LOAD x4, 4 * REGBYTES(sp) /* restore tp */
LOAD x5, 5 * REGBYTES(sp)
LOAD x6, 6 * REGBYTES(sp)
LOAD x7, 7 * REGBYTES(sp)
LOAD x8, 8 * REGBYTES(sp)
LOAD x9, 9 * REGBYTES(sp)
LOAD x10, 10 * REGBYTES(sp)
LOAD x11, 11 * REGBYTES(sp)
LOAD x12, 12 * REGBYTES(sp)
LOAD x13, 13 * REGBYTES(sp)
LOAD x14, 14 * REGBYTES(sp)
LOAD x15, 15 * REGBYTES(sp)
LOAD x16, 16 * REGBYTES(sp)
LOAD x17, 17 * REGBYTES(sp)
LOAD x18, 18 * REGBYTES(sp)
LOAD x19, 19 * REGBYTES(sp)
LOAD x20, 20 * REGBYTES(sp)
LOAD x21, 21 * REGBYTES(sp)
LOAD x22, 22 * REGBYTES(sp)
LOAD x23, 23 * REGBYTES(sp)
LOAD x24, 24 * REGBYTES(sp)
LOAD x25, 25 * REGBYTES(sp)
LOAD x26, 26 * REGBYTES(sp)
LOAD x27, 27 * REGBYTES(sp)
LOAD x28, 28 * REGBYTES(sp)
LOAD x29, 29 * REGBYTES(sp)
LOAD x30, 30 * REGBYTES(sp)
LOAD x31, 31 * REGBYTES(sp)
/* restore user sp */
LOAD sp, 32 * REGBYTES(sp)
.endm
.macro RESTORE_SYS_GP
.option push
.option norelax
la gp, __global_pointer$
.option pop
.endm
.macro OPEN_INTERRUPT
csrsi sstatus, 2
.endm
.macro CLOSE_INTERRUPT
csrci sstatus, 2
.endm
#endif /* __ASSEMBLY__ */
#endif /* __STACKFRAME_H__ */

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/01 Bernard The first version
* 2018/12/27 Jesven Add SMP support
* 2020/6/12 Xim Port to QEMU and remove SMP support
* 2024-06-30 Shell Support of kernel remapping
*/
#include <encoding.h>
#include <cpuport.h>
.data
.global boot_hartid /* global varible rt_boot_hartid in .data section */
boot_hartid:
.word 0xdeadbeef
.global _start
.section ".start", "ax"
_start:
j 1f
.word 0xdeadbeef
.align 3
.global g_wake_up
g_wake_up:
.dword 1
.dword 0
1:
/* save hartid */
la t0, boot_hartid /* global varible rt_boot_hartid */
mv t1, a0 /* get hartid in S-mode frome a0 register */
sw t1, (t0) /* store t1 register low 4 bits in memory address which is stored in t0 */
/* clear Interrupt Registers */
csrw sie, 0
csrw sip, 0
/* set Trap Vector Base Address Register */
la t0, trap_entry
csrw stvec, t0
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
/* set to disable FPU */
li t0, SSTATUS_FS + SSTATUS_VS
csrc sstatus, t0
li t0, SSTATUS_SUM
csrs sstatus, t0
.option push
.option norelax
la gp, __global_pointer$
.option pop
/* removed SMP support here */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
/**
* sscratch is always zero on kernel mode
*/
csrw sscratch, zero
call init_bss
#ifdef ARCH_MM_MMU
call rt_hw_mem_setup_early
call rt_kmem_pvoff
/* a0 := pvoff */
beq a0, zero, 1f
/* relocate pc */
la x1, _after_pc_relocation
sub x1, x1, a0
ret
_after_pc_relocation:
/* relocate gp */
sub gp, gp, a0
/* relocate context: sp */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
/* reset s0-fp */
mv s0, zero
/* relocate stvec */
la t0, trap_entry
csrw stvec, t0
1:
#endif
call sbi_init
call primary_cpu_entry
_never_return_here:
j .
.global _start_link_addr
_start_link_addr:
.dword __text_start

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-02-03 lizhirui first version
* 2022-11-10 WangXiaoyao Add readable syscall tracing
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SMART
#define DBG_TAG "syscall"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#include <stdint.h>
#include <mmu.h>
#include <page.h>
#include <lwp_user_mm.h>
#include "riscv_mmu.h"
#include "stack.h"
typedef rt_ubase_t (*syscallfunc_t)(rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t);
void syscall_handler(struct rt_hw_stack_frame *regs)
{
const char *syscall_name;
int syscallid = regs->a7;
if (syscallid == 0)
{
LOG_E("syscall id = 0!\n");
while (1)
;
}
syscallfunc_t syscallfunc = (syscallfunc_t)lwp_get_sys_api(syscallid);
if (syscallfunc == RT_NULL)
{
LOG_E("unsupported syscall!\n");
sys_exit_group(-1);
}
#if DBG_LVL >= DBG_INFO
syscall_name = lwp_get_syscall_name(syscallid);
#endif
LOG_I("[0x%lx] %s(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx)", rt_thread_self(), syscall_name,
regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
regs->a0 = syscallfunc(regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
regs->a7 = 0;
regs->epc += 4; // skip ecall instruction
LOG_I("[0x%lx] %s ret: 0x%lx", rt_thread_self(), syscall_name, regs->a0);
}
#endif /* RT_USING_SMART */

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting code.
* 2024/07/08 Shell Using CPUTIME as tick
*/
#include <rthw.h>
#include <rtthread.h>
#include <drivers/cputime.h>
#include <encoding.h>
#include "sbi.h"
#ifdef RT_USING_KTIME
#include <ktime.h>
#endif
static volatile unsigned long tick_cycles = 0;
int tick_isr(void)
{
rt_tick_increase();
sbi_set_timer(clock_cpu_gettime() + tick_cycles);
return 0;
}
/* BSP should config clockbase frequency */
RT_STATIC_ASSERT(defined_clockbase_freq, CPUTIME_TIMER_FREQ != 0);
/* Sets and enable the timer interrupt */
int rt_hw_tick_init(void)
{
/* calculate the tick cycles */
tick_cycles = CPUTIME_TIMER_FREQ / RT_TICK_PER_SECOND;
/* Clear the Supervisor-Timer bit in SIE */
clear_csr(sie, SIP_STIP);
/* Init riscv timer */
riscv_cputime_init();
/* Set timer */
sbi_set_timer(clock_cpu_gettime() + tick_cycles);
#ifdef RT_USING_KTIME
rt_ktime_cputimer_init();
#endif
/* Enable the Supervisor-Timer bit in SIE */
set_csr(sie, SIP_STIP);
return 0;
}
/**
* This function will delay for some us.
*
* @param us the delay time of us
*/
void rt_hw_us_delay(rt_uint32_t us)
{
unsigned long start_time;
unsigned long end_time;
unsigned long run_time;
start_time = clock_cpu_gettime();
end_time = start_time + us * (CPUTIME_TIMER_FREQ / 1000000);
do
{
run_time = clock_cpu_gettime();
} while(run_time < end_time);
}

View File

@ -0,0 +1,17 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting code.
*/
#ifndef TICK_H__
#define TICK_H__
int tick_isr(void);
int rt_hw_tick_init(void);
#endif

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-28 WangXiaoyao the first version
*/
#ifndef __TLB_H__
#define __TLB_H__
#include <stddef.h>
#include <stdint.h>
#include <rtthread.h>
#include <mm_aspace.h>
#include "sbi.h"
#include "riscv_mmu.h"
#define HANDLE_FAULT(ret) \
if (__builtin_expect((ret) != SBI_SUCCESS, 0)) \
LOG_W("%s failed", __FUNCTION__);
static inline void rt_hw_tlb_invalidate_all(void)
{
uintptr_t mask = -1ul;
HANDLE_FAULT(sbi_remote_sfence_vma(&mask, -1ul, 0, mask));
}
static inline void rt_hw_tlb_invalidate_all_local(void)
{
__asm__ volatile("sfence.vma" ::: "memory");
}
static inline void rt_hw_tlb_invalidate_aspace(rt_aspace_t aspace)
{
// TODO ASID
rt_hw_tlb_invalidate_all_local();
}
static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
{
__asm__ volatile("sfence.vma %0, zero" ::"r"(start) : "memory");
}
static inline void rt_hw_tlb_invalidate_range(rt_aspace_t aspace, void *start,
size_t size, size_t stride)
{
// huge page is taking as normal page
if (size <= ARCH_PAGE_SIZE)
{
rt_hw_tlb_invalidate_page(aspace, start);
}
else
{
rt_hw_tlb_invalidate_aspace(aspace);
}
}
#endif /* __TLB_H__ */

View File

@ -0,0 +1,386 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-08 RT-Thread first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#include <mm_fault.h>
#include <mmu.h>
#include <encoding.h>
#include <stack.h>
#include <sbi.h>
#include <riscv.h>
#include <interrupt.h>
#include <plic.h>
#include <tick.h>
#ifdef RT_USING_SMART
#include <lwp_arch.h>
#endif
#define DBG_TAG "libcpu.trap"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
void dump_regs(struct rt_hw_stack_frame *regs)
{
rt_kprintf("--------------Dump Registers-----------------\n");
rt_kprintf("Function Registers:\n");
rt_kprintf("\tra(x1) = %p\tuser_sp = %p\n", regs->ra,
regs->user_sp_exc_stack);
rt_kprintf("\tgp(x3) = %p\ttp(x4) = %p\n", regs->gp, regs->tp);
rt_kprintf("Temporary Registers:\n");
rt_kprintf("\tt0(x5) = %p\tt1(x6) = %p\n", regs->t0, regs->t1);
rt_kprintf("\tt2(x7) = %p\n", regs->t2);
rt_kprintf("\tt3(x28) = %p\tt4(x29) = %p\n", regs->t3, regs->t4);
rt_kprintf("\tt5(x30) = %p\tt6(x31) = %p\n", regs->t5, regs->t6);
rt_kprintf("Saved Registers:\n");
rt_kprintf("\ts0/fp(x8) = %p\ts1(x9) = %p\n", regs->s0_fp, regs->s1);
rt_kprintf("\ts2(x18) = %p\ts3(x19) = %p\n", regs->s2, regs->s3);
rt_kprintf("\ts4(x20) = %p\ts5(x21) = %p\n", regs->s4, regs->s5);
rt_kprintf("\ts6(x22) = %p\ts7(x23) = %p\n", regs->s6, regs->s7);
rt_kprintf("\ts8(x24) = %p\ts9(x25) = %p\n", regs->s8, regs->s9);
rt_kprintf("\ts10(x26) = %p\ts11(x27) = %p\n", regs->s10, regs->s11);
rt_kprintf("Function Arguments Registers:\n");
rt_kprintf("\ta0(x10) = %p\ta1(x11) = %p\n", regs->a0, regs->a1);
rt_kprintf("\ta2(x12) = %p\ta3(x13) = %p\n", regs->a2, regs->a3);
rt_kprintf("\ta4(x14) = %p\ta5(x15) = %p\n", regs->a4, regs->a5);
rt_kprintf("\ta6(x16) = %p\ta7(x17) = %p\n", regs->a6, regs->a7);
rt_kprintf("sstatus = %p\n", regs->sstatus);
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SIE)
? "Supervisor Interrupt Enabled"
: "Supervisor Interrupt Disabled");
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPIE)
? "Last Time Supervisor Interrupt Enabled"
: "Last Time Supervisor Interrupt Disabled");
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPP)
? "Last Privilege is Supervisor Mode"
: "Last Privilege is User Mode");
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SUM)
? "Permit to Access User Page"
: "Not Permit to Access User Page");
rt_kprintf("\t%s\n", (regs->sstatus & (1 << 19))
? "Permit to Read Executable-only Page"
: "Not Permit to Read Executable-only Page");
rt_ubase_t satp_v = read_csr(satp);
rt_kprintf("satp = %p\n", satp_v);
rt_kprintf("\tCurrent Page Table(Physical) = %p\n",
__MASKVALUE(satp_v, __MASK(44)) << PAGE_OFFSET_BIT);
rt_kprintf("\tCurrent ASID = %p\n", __MASKVALUE(satp_v >> 44, __MASK(16))
<< PAGE_OFFSET_BIT);
const char *mode_str = "Unknown Address Translation/Protection Mode";
switch (__MASKVALUE(satp_v >> 60, __MASK(4)))
{
case 0:
mode_str = "No Address Translation/Protection Mode";
break;
case 8:
mode_str = "Page-based 39-bit Virtual Addressing Mode";
break;
case 9:
mode_str = "Page-based 48-bit Virtual Addressing Mode";
break;
}
rt_kprintf("\tMode = %s\n", mode_str);
rt_kprintf("-----------------Dump OK---------------------\n");
}
static const char *Exception_Name[] = {"Instruction Address Misaligned",
"Instruction Access Fault",
"Illegal Instruction",
"Breakpoint",
"Load Address Misaligned",
"Load Access Fault",
"Store/AMO Address Misaligned",
"Store/AMO Access Fault",
"Environment call from U-mode",
"Environment call from S-mode",
"Reserved-10",
"Reserved-11",
"Instruction Page Fault",
"Load Page Fault",
"Reserved-14",
"Store/AMO Page Fault"};
static const char *Interrupt_Name[] = {
"User Software Interrupt",
"Supervisor Software Interrupt",
"Reversed-2",
"Reversed-3",
"User Timer Interrupt",
"Supervisor Timer Interrupt",
"Reversed-6",
"Reversed-7",
"User External Interrupt",
"Supervisor External Interrupt",
"Reserved-10",
"Reserved-11",
};
#ifndef RT_USING_SMP
static volatile int nested = 0;
#define ENTER_TRAP nested += 1
#define EXIT_TRAP nested -= 1
#define CHECK_NESTED_PANIC(cause, tval, epc, eframe) \
if (nested != 1) handle_nested_trap_panic(cause, tval, epc, eframe)
#endif /* RT_USING_SMP */
static const char *get_exception_msg(int id)
{
const char *msg;
if (id < sizeof(Exception_Name) / sizeof(const char *))
{
msg = Exception_Name[id];
}
else
{
msg = "Unknown Exception";
}
return msg;
}
#ifdef RT_USING_SMART
#include "lwp.h"
void handle_user(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc,
struct rt_hw_stack_frame *sp)
{
rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
struct rt_lwp *lwp;
/* user page fault */
enum rt_mm_fault_op fault_op;
enum rt_mm_fault_type fault_type;
switch (id)
{
case EP_LOAD_PAGE_FAULT:
fault_op = MM_FAULT_OP_READ;
fault_type = MM_FAULT_TYPE_GENERIC_MMU;
break;
case EP_LOAD_ACCESS_FAULT:
fault_op = MM_FAULT_OP_READ;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_LOAD_ADDRESS_MISALIGNED:
fault_op = MM_FAULT_OP_READ;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_STORE_PAGE_FAULT:
fault_op = MM_FAULT_OP_WRITE;
fault_type = MM_FAULT_TYPE_GENERIC_MMU;
break;
case EP_STORE_ACCESS_FAULT:
fault_op = MM_FAULT_OP_WRITE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_STORE_ADDRESS_MISALIGNED:
fault_op = MM_FAULT_OP_WRITE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_INSTRUCTION_PAGE_FAULT:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = MM_FAULT_TYPE_GENERIC_MMU;
break;
case EP_INSTRUCTION_ACCESS_FAULT:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_INSTRUCTION_ADDRESS_MISALIGNED:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
default:
fault_op = 0;
}
if (fault_op)
{
rt_base_t saved_stat;
lwp = lwp_self();
struct rt_aspace_fault_msg msg = {
.fault_op = fault_op,
.fault_type = fault_type,
.fault_vaddr = (void *)stval,
};
__asm__ volatile("csrrsi %0, sstatus, 2" : "=r"(saved_stat));
if (lwp && rt_aspace_fault_try_fix(lwp->aspace, &msg))
{
__asm__ volatile("csrw sstatus, %0" ::"r"(saved_stat));
return;
}
__asm__ volatile("csrw sstatus, %0" ::"r"(saved_stat));
}
LOG_E("[FATAL ERROR] Exception %ld:%s\n", id, get_exception_msg(id));
LOG_E("scause:%p,stval:%p,sepc:%p\n", scause, stval, sepc);
dump_regs(sp);
rt_thread_t cur_thr = rt_thread_self();
struct rt_hw_backtrace_frame frame = {.fp = sp->s0_fp, .pc = sepc};
rt_kprintf("fp = %p\n", frame.fp);
lwp_backtrace_frame(cur_thr, &frame);
LOG_E("User Fault, killing thread: %s", cur_thr->parent.name);
EXIT_TRAP;
sys_exit_group(-1);
}
#endif
#ifdef ARCH_RISCV_VECTOR
static void vector_enable(struct rt_hw_stack_frame *sp)
{
sp->sstatus |= SSTATUS_VS_INITIAL;
}
/**
* detect V/D support, and do not distinguish V/D instruction
*/
static int illegal_inst_recoverable(rt_ubase_t stval,
struct rt_hw_stack_frame *sp)
{
// first 7 bits is opcode
int opcode = stval & 0x7f;
int csr = (stval & 0xFFF00000) >> 20;
// ref riscv-v-spec-1.0, [Vector Instruction Formats]
int width = ((stval & 0x7000) >> 12) - 1;
int flag = 0;
switch (opcode)
{
case 0x57: // V
case 0x27: // scalar FLOAT
case 0x07:
case 0x73: // CSR
flag = 1;
break;
}
if (flag)
{
vector_enable(sp);
}
return flag;
}
#endif
static void handle_nested_trap_panic(rt_ubase_t cause, rt_ubase_t tval,
rt_ubase_t epc,
struct rt_hw_stack_frame *eframe)
{
LOG_E("\n-------- [SEVER ERROR] --------");
LOG_E("Nested trap detected");
LOG_E("scause:%p,stval:%p,sepc:%p\n", cause, tval, epc);
dump_regs(eframe);
rt_hw_cpu_shutdown();
}
#define IN_USER_SPACE (stval >= USER_VADDR_START && stval < USER_VADDR_TOP)
#define PAGE_FAULT (id == EP_LOAD_PAGE_FAULT || id == EP_STORE_PAGE_FAULT)
/* Trap entry */
void handle_trap(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc,
struct rt_hw_stack_frame *sp)
{
ENTER_TRAP;
rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
const char *msg;
/* supervisor external interrupt */
if ((SCAUSE_INTERRUPT & scause) &&
SCAUSE_S_EXTERNAL_INTR == (scause & 0xff))
{
rt_interrupt_enter();
plic_handle_irq();
rt_interrupt_leave();
}
else if ((SCAUSE_INTERRUPT | SCAUSE_S_TIMER_INTR) == scause)
{
/* supervisor timer */
rt_interrupt_enter();
tick_isr();
rt_interrupt_leave();
}
else
{
if (SCAUSE_INTERRUPT & scause)
{
if (id < sizeof(Interrupt_Name) / sizeof(const char *))
{
msg = Interrupt_Name[id];
}
else
{
msg = "Unknown Interrupt";
}
LOG_E("Unhandled Interrupt %ld:%s\n", id, msg);
}
else
{
#ifdef ARCH_RISCV_VECTOR
if (scause == 0x2)
{
if (!(sp->sstatus & SSTATUS_VS) &&
illegal_inst_recoverable(stval, sp))
goto _exit;
}
#endif /* ARCH_RISCV_VECTOR */
#ifdef RT_USING_SMART
if (!(sp->sstatus & 0x100) || (PAGE_FAULT && IN_USER_SPACE))
{
handle_user(scause, stval, sepc, sp);
// if handle_user() return here, jump to u mode then
goto _exit;
}
#endif
// handle kernel exception:
rt_kprintf("Unhandled Exception %ld:%s\n", id,
get_exception_msg(id));
}
// trap cannot nested when handling another trap / interrupt
CHECK_NESTED_PANIC(scause, stval, sepc, sp);
rt_kprintf("scause:%p,stval:%p,sepc:%p\n", scause, stval, sepc);
dump_regs(sp);
rt_thread_t cur_thr = rt_thread_self();
rt_kprintf("--------------Thread list--------------\n");
rt_kprintf("current thread: %s\n", cur_thr->parent.name);
rt_kprintf("--------------Backtrace--------------\n");
struct rt_hw_backtrace_frame frame = {.fp = sp->s0_fp, .pc = sepc};
#ifdef RT_USING_SMART
if (!(sp->sstatus & 0x100))
{
lwp_backtrace_frame(cur_thr, &frame);
}
else
#endif
{
rt_backtrace_frame(cur_thr, &frame);
}
while (1)
;
}
_exit:
EXIT_TRAP;
return;
}

View File

@ -0,0 +1,12 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
ASFLAGS = ' -I ' + cwd
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH, ASFLAGS = ASFLAGS)
Return('group')

View File

@ -0,0 +1,17 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-04-26 Shell lockless rt_completion
*/
#include <rtthread.h>
#undef rt_hw_isb
rt_weak void rt_hw_isb(void)
{
return ;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,186 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024/01/11 flyingcys The first version
*/
#include <rtthread.h>
#include <encoding.h>
struct exception_stack_frame
{
uint64_t x1;
uint64_t x2;
uint64_t x3;
uint64_t x4;
uint64_t x5;
uint64_t x6;
uint64_t x7;
uint64_t x8;
uint64_t x9;
uint64_t x10;
uint64_t x11;
uint64_t x12;
uint64_t x13;
uint64_t x14;
uint64_t x15;
uint64_t x16;
uint64_t x17;
uint64_t x18;
uint64_t x19;
uint64_t x20;
uint64_t x21;
uint64_t x22;
uint64_t x23;
uint64_t x24;
uint64_t x25;
uint64_t x26;
uint64_t x27;
uint64_t x28;
uint64_t x29;
uint64_t x30;
uint64_t x31;
};
static void print_stack_frame(uintptr_t * sp)
{
struct exception_stack_frame * esf = (struct exception_stack_frame *)(sp+1);
rt_kprintf("\n=================================================================\n");
rt_kprintf("x1 (ra : Return address ) ==> 0x%08x%08x\n", esf->x1 >> 32 , esf->x1 & UINT32_MAX);
rt_kprintf("x2 (sp : Stack pointer ) ==> 0x%08x%08x\n", esf->x2 >> 32 , esf->x2 & UINT32_MAX);
rt_kprintf("x3 (gp : Global pointer ) ==> 0x%08x%08x\n", esf->x3 >> 32 , esf->x3 & UINT32_MAX);
rt_kprintf("x4 (tp : Thread pointer ) ==> 0x%08x%08x\n", esf->x4 >> 32 , esf->x4 & UINT32_MAX);
rt_kprintf("x5 (t0 : Temporary ) ==> 0x%08x%08x\n", esf->x5 >> 32 , esf->x5 & UINT32_MAX);
rt_kprintf("x6 (t1 : Temporary ) ==> 0x%08x%08x\n", esf->x6 >> 32 , esf->x6 & UINT32_MAX);
rt_kprintf("x7 (t2 : Temporary ) ==> 0x%08x%08x\n", esf->x7 >> 32 , esf->x7 & UINT32_MAX);
rt_kprintf("x8 (s0/fp: Save register,frame pointer ) ==> 0x%08x%08x\n", esf->x8 >> 32 , esf->x8 & UINT32_MAX);
rt_kprintf("x9 (s1 : Save register ) ==> 0x%08x%08x\n", esf->x9 >> 32 , esf->x9 & UINT32_MAX);
rt_kprintf("x10(a0 : Function argument,return value) ==> 0x%08x%08x\n", esf->x10 >> 32 , esf->x10 & UINT32_MAX);
rt_kprintf("x11(a1 : Function argument,return value) ==> 0x%08x%08x\n", esf->x11 >> 32 , esf->x11 & UINT32_MAX);
rt_kprintf("x12(a2 : Function argument ) ==> 0x%08x%08x\n", esf->x12 >> 32 , esf->x12 & UINT32_MAX);
rt_kprintf("x13(a3 : Function argument ) ==> 0x%08x%08x\n", esf->x13 >> 32 , esf->x13 & UINT32_MAX);
rt_kprintf("x14(a4 : Function argument ) ==> 0x%08x%08x\n", esf->x14 >> 32 , esf->x14 & UINT32_MAX);
rt_kprintf("x15(a5 : Function argument ) ==> 0x%08x%08x\n", esf->x15 >> 32 , esf->x15 & UINT32_MAX);
rt_kprintf("x16(a6 : Function argument ) ==> 0x%08x%08x\n", esf->x16 >> 32 , esf->x16 & UINT32_MAX);
rt_kprintf("x17(a7 : Function argument ) ==> 0x%08x%08x\n", esf->x17 >> 32 , esf->x17 & UINT32_MAX);
rt_kprintf("x18(s2 : Save register ) ==> 0x%08x%08x\n", esf->x18 >> 32 , esf->x18 & UINT32_MAX);
rt_kprintf("x19(s3 : Save register ) ==> 0x%08x%08x\n", esf->x19 >> 32 , esf->x19 & UINT32_MAX);
rt_kprintf("x20(s4 : Save register ) ==> 0x%08x%08x\n", esf->x20 >> 32 , esf->x20 & UINT32_MAX);
rt_kprintf("x21(s5 : Save register ) ==> 0x%08x%08x\n", esf->x21 >> 32 , esf->x21 & UINT32_MAX);
rt_kprintf("x22(s6 : Save register ) ==> 0x%08x%08x\n", esf->x22 >> 32 , esf->x22 & UINT32_MAX);
rt_kprintf("x23(s7 : Save register ) ==> 0x%08x%08x\n", esf->x23 >> 32 , esf->x23 & UINT32_MAX);
rt_kprintf("x24(s8 : Save register ) ==> 0x%08x%08x\n", esf->x24 >> 32 , esf->x24 & UINT32_MAX);
rt_kprintf("x25(s9 : Save register ) ==> 0x%08x%08x\n", esf->x25 >> 32 , esf->x25 & UINT32_MAX);
rt_kprintf("x26(s10 : Save register ) ==> 0x%08x%08x\n", esf->x26 >> 32 , esf->x26 & UINT32_MAX);
rt_kprintf("x27(s11 : Save register ) ==> 0x%08x%08x\n", esf->x27 >> 32 , esf->x27 & UINT32_MAX);
rt_kprintf("x28(t3 : Temporary ) ==> 0x%08x%08x\n", esf->x28 >> 32 , esf->x28 & UINT32_MAX);
rt_kprintf("x29(t4 : Temporary ) ==> 0x%08x%08x\n", esf->x29 >> 32 , esf->x29 & UINT32_MAX);
rt_kprintf("x30(t5 : Temporary ) ==> 0x%08x%08x\n", esf->x30 >> 32 , esf->x30 & UINT32_MAX);
rt_kprintf("x31(t6 : Temporary ) ==> 0x%08x%08x\n", esf->x31 >> 32 , esf->x31 & UINT32_MAX);
rt_kprintf("=================================================================\n");
}
rt_weak void rt_hw_soft_irq_isr(void)
{
}
rt_weak int rt_hw_tick_isr(void)
{
return 0;
}
rt_weak void rt_hw_irq_isr(void)
{
}
rt_weak rt_size_t handle_trap(rt_size_t cause, rt_size_t epc, rt_size_t *sp)
{
if (cause & (1UL << (__riscv_xlen - 1))) //interrupt
{
if ((cause & 0x1f) == IRQ_M_SOFT)
{
rt_hw_soft_irq_isr();
}
else if ((cause & 0x1f) == IRQ_M_TIMER)
{
rt_hw_tick_isr();
}
else if ((cause & 0x1f) == IRQ_M_EXT)
{
rt_hw_irq_isr();
}
}
else
{
rt_thread_t tid;
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
extern long list_thread();
#endif
rt_hw_interrupt_disable();
tid = rt_thread_self();
rt_kprintf("\nException:\n");
switch (cause)
{
case CAUSE_MISALIGNED_FETCH:
rt_kprintf("Instruction address misaligned");
break;
case CAUSE_FAULT_FETCH:
rt_kprintf("Instruction access fault");
break;
case CAUSE_ILLEGAL_INSTRUCTION:
rt_kprintf("Illegal instruction");
break;
case CAUSE_BREAKPOINT:
rt_kprintf("Breakpoint");
break;
case CAUSE_MISALIGNED_LOAD:
rt_kprintf("Load address misaligned");
break;
case CAUSE_FAULT_LOAD:
rt_kprintf("Load access fault");
break;
case CAUSE_MISALIGNED_STORE:
rt_kprintf("Store address misaligned");
break;
case CAUSE_FAULT_STORE:
rt_kprintf("Store access fault");
break;
case CAUSE_USER_ECALL:
rt_kprintf("Environment call from U-mode");
break;
case CAUSE_SUPERVISOR_ECALL:
rt_kprintf("Environment call from S-mode");
break;
case CAUSE_HYPERVISOR_ECALL:
rt_kprintf("Environment call from H-mode");
break;
case CAUSE_MACHINE_ECALL:
rt_kprintf("Environment call from M-mode");
break;
default:
rt_kprintf("Uknown exception : %08lX", cause);
break;
}
rt_kprintf("\n");
print_stack_frame(sp);
rt_kprintf("exception pc => 0x%08x\n", epc);
rt_kprintf("current thread: %.*s\n", RT_NAME_MAX, tid->parent.name);
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
list_thread();
#endif
while(1);
}
return epc;
}

View File

@ -0,0 +1,13 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
if not GetDepend('ARCH_USING_ASID'):
SrcRemove(src, ['asid.c'])
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,10 @@
#ifndef _SBI_ASM_H
#define _SBI_ASM_H
.macro SBI_CALL which
li a7, \which
ecall
nop
.endm
#endif /* _SBI_ASM_H */

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2019-2020, Xim
*
* SPDX-License-Identifier: Apache-2.0
*
*/
#ifndef _ASM_SBI_DEF_H
#define _ASM_SBI_DEF_H
#define SBI_SET_TIMER 0
#define SBI_CONSOLE_PUTCHAR 1
#define SBI_CONSOLE_GETCHAR 2
#define SBI_CLEAR_IPI 3
#define SBI_SEND_IPI 4
#define SBI_REMOTE_FENCE_I 5
#define SBI_REMOTE_SFENCE_VMA 6
#define SBI_REMOTE_SFENCE_VMA_ASID 7
#define SBI_SHUTDOWN 8
#define SBI_CONSOLE_PUTSTR 9
#define SBI_SD_WRITE 10
#define SBI_SD_READ 11
#define SBI_NET_WRITE 12
#define SBI_NET_READ 13
#endif /* _ASM_SBI_DEF_H */

View File

@ -0,0 +1,129 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-29 lizhirui first version
* 2021-11-05 JasonHu add c906 cache inst
* 2022-11-09 WangXiaoyao Support cache coherence operations;
* improve portability and make
* no assumption on undefined behavior
*/
#include <rthw.h>
#include <rtdef.h>
#include <board.h>
#include <riscv.h>
#include "opcode.h"
#include "cache.h"
#define L1_CACHE_BYTES (64)
/**
* GCC version not support t-head cache flush, so we use fixed code to achieve.
* The following function cannot be optimized.
*/
static void dcache_wb_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
static void dcache_inv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
static void dcache_wbinv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
static void icache_inv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
#define CACHE_OP_RS1 %0
#define CACHE_OP_RANGE(instr) \
{ \
register rt_ubase_t i = start & ~(L1_CACHE_BYTES - 1); \
for (; i < end; i += L1_CACHE_BYTES) \
{ \
__asm__ volatile(instr ::"r"(i) \
: "memory"); \
} \
}
static void dcache_wb_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_DCACHE_CVA(CACHE_OP_RS1));
}
static void dcache_inv_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_DCACHE_IVA(CACHE_OP_RS1));
}
static void dcache_wbinv_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_DCACHE_CIVA(CACHE_OP_RS1));
}
static void icache_inv_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_ICACHE_IVA(CACHE_OP_RS1));
}
rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
{
return L1_CACHE_BYTES;
}
rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
{
return L1_CACHE_BYTES;
}
void rt_hw_cpu_icache_invalidate_local(void *addr, int size)
{
icache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync_i();
}
void rt_hw_cpu_dcache_invalidate_local(void *addr, int size)
{
dcache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync();
}
void rt_hw_cpu_dcache_clean_local(void *addr, int size)
{
dcache_wb_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync();
}
void rt_hw_cpu_dcache_clean_and_invalidate_local(void *addr, int size)
{
dcache_wbinv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync();
}
/**
* =====================================================
* Architecture Independent API
* =====================================================
*/
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_INVALIDATE)
{
rt_hw_cpu_icache_invalidate_local(addr, size);
}
}
void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_FLUSH)
{
rt_hw_cpu_dcache_clean_local(addr, size);
}
else
{
rt_hw_cpu_dcache_invalidate_local(addr, size);
}
}
void rt_hw_sync_cache_local(void *addr, int size)
{
rt_hw_cpu_dcache_clean_local(addr, size);
rt_hw_cpu_icache_invalidate_local(addr, size);
}

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-05 JasonHu The first version
*/
#ifndef CACHE_H__
#define CACHE_H__
#include "opcode.h"
#ifndef ALWAYS_INLINE
#define ALWAYS_INLINE inline __attribute__((always_inline))
#endif
#define rt_hw_cpu_sync() __asm__ volatile(OPC_SYNC:: \
: "memory")
#define rt_hw_cpu_sync_i() __asm__ volatile(OPC_SYNC_I:: \
: "memory");
/**
* ========================================
* Local cpu cache maintainence operations
* ========================================
*/
void rt_hw_cpu_dcache_clean_local(void *addr, int size);
void rt_hw_cpu_dcache_invalidate_local(void *addr, int size);
void rt_hw_cpu_dcache_clean_and_invalidate_local(void *addr, int size);
void rt_hw_cpu_icache_invalidate_local(void *addr, int size);
ALWAYS_INLINE void rt_hw_cpu_dcache_clean_all_local(void)
{
__asm__ volatile(OPC_DCACHE_CALL ::
: "memory");
rt_hw_cpu_sync();
}
ALWAYS_INLINE void rt_hw_cpu_dcache_invalidate_all_local(void)
{
__asm__ volatile(OPC_DCACHE_IALL ::
: "memory");
rt_hw_cpu_sync();
}
ALWAYS_INLINE void rt_hw_cpu_dcache_clean_and_invalidate_all_local(void)
{
__asm__ volatile(OPC_DCACHE_CIALL ::
: "memory");
rt_hw_cpu_sync();
}
ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_all_local(void)
{
__asm__ volatile(OPC_ICACHE_IALL ::
: "memory");
rt_hw_cpu_sync_i();
}
#define rt_hw_icache_invalidate_all rt_hw_cpu_icache_invalidate_all
/**
* ========================================
* Multi-core cache maintainence operations
* ========================================
*/
#ifdef RT_USING_SMP
#error "TODO: cache maintainence have not ported to RISC-V SMP yet"
void rt_hw_cpu_dcache_clean(void *addr, int size);
void rt_hw_cpu_dcache_invalidate(void *addr, int size);
void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, int size);
void rt_hw_cpu_dcache_clean_all(void);
void rt_hw_cpu_dcache_invalidate_all(void);
void rt_hw_cpu_dcache_clean_and_invalidate_all(void);
void rt_hw_cpu_icache_invalidate(void *addr, int size);
void rt_hw_cpu_icache_invalidate_all(void);
#else /* !RT_USING_SMP */
#define rt_hw_cpu_dcache_clean rt_hw_cpu_dcache_clean_local
#define rt_hw_cpu_dcache_invalidate rt_hw_cpu_dcache_invalidate_local
#define rt_hw_cpu_dcache_clean_and_invalidate rt_hw_cpu_dcache_clean_and_invalidate_local
#define rt_hw_cpu_dcache_clean_all rt_hw_cpu_dcache_clean_all_local
#define rt_hw_cpu_dcache_invalidate_all rt_hw_cpu_dcache_invalidate_all_local
#define rt_hw_cpu_dcache_clean_and_invalidate_all rt_hw_cpu_dcache_clean_and_invalidate_all_local
#define rt_hw_cpu_icache_invalidate rt_hw_cpu_icache_invalidate_local
#define rt_hw_cpu_icache_invalidate_all rt_hw_cpu_icache_invalidate_all_local
#endif /* RT_USING_SMP */
/**
* @brief Synchronize cache to Point of Coherent
*/
void rt_hw_sync_cache_local(void *addr, int size);
#endif /* CACHE_H__ */

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
*/
#include <rthw.h>
#include <rtthread.h>
#include "interrupt.h"
#include "riscv.h"
#include "plic.h"
extern rt_atomic_t rt_interrupt_nest;
extern rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
extern rt_uint32_t rt_thread_switch_interrupt_flag;
struct rt_irq_desc isr_table[INTERRUPTS_MAX];
static void rt_hw_interrupt_handler(int vector, void *param)
{
rt_kprintf("Unhandled interrupt %d occured!!!\n", vector);
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
/* init interrupt controller */
plic_init();
rt_int32_t idx;
rt_memset(isr_table, 0x00, sizeof(isr_table));
for (idx = 0; idx < INTERRUPTS_MAX; idx++)
{
isr_table[idx].handler = rt_hw_interrupt_handler;
}
/* init interrupt nest, and context in thread sp */
rt_interrupt_nest = 0;
rt_interrupt_from_thread = 0;
rt_interrupt_to_thread = 0;
rt_thread_switch_interrupt_flag = 0;
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
if ((vector < 0) || (vector > IRQ_MAX_NR))
{
return;
}
plic_disable_irq(vector);
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
if ((vector < 0) || (vector > IRQ_MAX_NR))
{
return;
}
plic_enable_irq(vector);
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param handler the interrupt service routine to be installed
* @param param the interrupt service function parameter
* @param name the interrupt name
* @return old handler
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if ((vector < 0) || (vector > IRQ_MAX_NR))
{
return old_handler;
}
old_handler = isr_table[IRQ_OFFSET + vector].handler;
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(isr_table[IRQ_OFFSET + vector].name, name, RT_NAME_MAX);
#endif /* RT_USING_INTERRUPT_INFO */
isr_table[IRQ_OFFSET + vector].handler = handler;
isr_table[IRQ_OFFSET + vector].param = param;
return old_handler;
}

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
*/
#ifndef __INTERRUPT_H__
#define __INTERRUPT_H__
#include <rthw.h>
#define NR_CPUS 1
#define IRQ_OFFSET 16
#ifndef IRQ_MAX_NR
#define IRQ_MAX_NR 207
#endif
#define INTERRUPTS_MAX (IRQ_OFFSET + IRQ_MAX_NR)
enum {
EP_INSTRUCTION_ADDRESS_MISALIGNED = 0,
EP_INSTRUCTION_ACCESS_FAULT,
EP_ILLEGAL_INSTRUCTION,
EP_BREAKPOINT,
EP_LOAD_ADDRESS_MISALIGNED,
EP_LOAD_ACCESS_FAULT,
EP_STORE_ADDRESS_MISALIGNED,
EP_STORE_ACCESS_FAULT,
EP_ENVIRONMENT_CALL_U_MODE,
EP_ENVIRONMENT_CALL_S_MODE,
EP_RESERVED10,
EP_ENVIRONMENT_CALL_M_MODE,
EP_INSTRUCTION_PAGE_FAULT, /* page attr */
EP_LOAD_PAGE_FAULT, /* read data */
EP_RESERVED14,
EP_STORE_PAGE_FAULT, /* write data */
};
void rt_hw_interrupt_init(void);
void rt_hw_interrupt_mask(int vector);
void rt_hw_interrupt_umask(int vector);
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler, void *param, const char *name);
#endif

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-09 WangXiaoyao Add portable asm support
*/
#ifndef __OPCODE_H__
#define __OPCODE_H__
/**
* @brief binary opcode pseudo operations
* Used to bypass toolchain restriction on extension ISA
*
* WARNING: Xuantie ISAs are not compatible to each other in opcode.
* It's painful to port this file, and should be really careful.
*/
/**
* @brief RISC-V instruction formats
*/
/**
* R type: .insn r opcode6, func3, func7, rd, rs1, rs2
*
* +-------+-----+-----+-------+----+---------+
* | func7 | rs2 | rs1 | func3 | rd | opcode6 |
* +-------+-----+-----+-------+----+---------+
* 31 25 20 15 12 7 0
*/
#define __OPC_INSN_FORMAT_R(opcode, func3, func7, rd, rs1, rs2) \
".insn r "RT_STRINGIFY(opcode)","RT_STRINGIFY(func3)","RT_STRINGIFY(func7)","RT_STRINGIFY(rd)","RT_STRINGIFY(rs1)","RT_STRINGIFY(rs2)
/**
* @brief Xuantie T-HEAD extension ISA format
* Compatible to Xuantie C906R2S1 user manual v06
*/
#define __OPC_INSN_FORMAT_CACHE(func7, rs2, rs1) \
__OPC_INSN_FORMAT_R(0x0b, 0x0, func7, x0, rs1, rs2)
#ifdef _TOOLCHAIN_SUPP_XTHEADE_ISA_
#define OPC_SYNC "sync"
#define OPC_SYNC_I "sync.i"
#define OPC_DCACHE_CALL "dcache.call"
#define OPC_DCACHE_IALL "dcache.iall"
#define OPC_DCACHE_CIALL "dcache.ciall"
#define OPC_ICACHE_IALL "icache.iall"
#define OPC_DCACHE_CVA(rs1) "dcache.cva "RT_STRINGIFY(rs1)
#define OPC_DCACHE_IVA(rs1) "dcache.iva "RT_STRINGIFY(rs1)
#define OPC_DCACHE_CIVA(rs1) "dcache.civa "RT_STRINGIFY(rs1)
#define OPC_ICACHE_IVA(rs1) "icache.iva "RT_STRINGIFY(rs1)
#else /* !_TOOLCHAIN_NOT_SUPP_THEAD_ISA_ */
#define OPC_SYNC ".long 0x0180000B"
#define OPC_SYNC_I ".long 0x01A0000B"
#define OPC_DCACHE_CALL ".long 0x0010000B"
#define OPC_DCACHE_IALL ".long 0x0020000B"
#define OPC_DCACHE_CIALL ".long 0x0030000B"
#define OPC_ICACHE_IALL ".long 0x0100000B"
#define OPC_DCACHE_CVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x4, rs1)
#define OPC_DCACHE_IVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x6, rs1)
#define OPC_DCACHE_CIVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x7, rs1)
#define OPC_ICACHE_IVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x16, rs1)
#endif /* _TOOLCHAIN_NOT_SUPP_THEAD_ISA_ */
#ifdef _TOOLCHAIN_SUPP_ZIFENCEI_ISA_
#define OPC_FENCE_I "fence.i"
#else /* !_TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
#define OPC_FENCE_I ".long 0x0000100F"
#endif /* _TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
#endif /* __OPCODE_H__ */

View File

@ -0,0 +1,220 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
* 2021-11-12 JasonHu fix bug that not intr on f133
* 2023-04-22 flyingcys add plic register ioremap
*/
#include <rtthread.h>
#include <rtdbg.h>
#include "plic.h"
#include "interrupt.h"
#include "io.h"
#include "encoding.h"
#include "ioremap.h"
static void *c906_plic_regs = RT_NULL;
extern struct rt_irq_desc isr_table[];
struct plic_handler
{
rt_bool_t present;
void *hart_base;
void *enable_base;
};
rt_inline void plic_toggle(struct plic_handler *handler, int hwirq, int enable);
struct plic_handler c906_plic_handlers[C906_NR_CPUS];
static void *c906_irq_priority[INTERRUPTS_MAX] = {RT_NULL};
rt_inline void plic_irq_toggle(int hwirq, int enable)
{
int cpu = 0;
void *priority_addr;
/* set priority of interrupt, interrupt 0 is zero. */
priority_addr = (void *)((rt_size_t)c906_plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
#ifdef RT_USING_SMART
if (c906_irq_priority[hwirq] == RT_NULL)
{
c906_irq_priority[hwirq] = (void *)rt_ioremap(priority_addr, 0x1000);
}
priority_addr = c906_irq_priority[hwirq];
#endif
writel(enable, priority_addr);
struct plic_handler *handler = &c906_plic_handlers[cpu];
if (handler->present)
{
plic_toggle(handler, hwirq, enable);
}
}
static void generic_handle_irq(int irq)
{
rt_isr_handler_t isr;
void *param;
if (irq < 0 || irq >= IRQ_MAX_NR)
{
LOG_E("bad irq number %d!\n", irq);
return;
}
if (!irq) // irq = 0 => no irq
{
LOG_W("no irq!\n");
return;
}
isr = isr_table[IRQ_OFFSET + irq].handler;
param = isr_table[IRQ_OFFSET + irq].param;
if (isr != RT_NULL)
{
isr(irq, param);
}
/* complete irq. */
plic_complete(irq);
}
void plic_complete(int irqno)
{
int cpu = 0;
struct plic_handler *handler = &c906_plic_handlers[cpu];
writel(irqno, (void *)((rt_size_t)handler->hart_base + CONTEXT_CLAIM));
}
void plic_disable_irq(int irqno)
{
plic_irq_toggle(irqno, 0);
}
void plic_enable_irq(int irqno)
{
plic_irq_toggle(irqno, 1);
}
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
* that source ID back to the same claim register. This automatically enables
* and disables the interrupt, so there's nothing else to do.
*/
void plic_handle_irq(void)
{
int cpu = 0;
unsigned int irq;
struct plic_handler *handler = &c906_plic_handlers[cpu];
void *claim = (void *)((rt_size_t)handler->hart_base + CONTEXT_CLAIM);
if (c906_plic_regs == RT_NULL || !handler->present)
{
LOG_E("plic state not initialized.");
return;
}
clear_csr(sie, SIE_SEIE);
while ((irq = readl(claim)))
{
/* ID0 is diabled permantually from spec. */
if (irq == 0)
{
LOG_E("irq no is zero.");
}
else
{
generic_handle_irq(irq);
}
}
set_csr(sie, SIE_SEIE);
}
rt_inline void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
uint32_t *reg = (uint32_t *)((rt_size_t)handler->enable_base + (hwirq / 32) * sizeof(uint32_t));
uint32_t hwirq_mask = 1 << (hwirq % 32);
if (enable)
{
writel(readl(reg) | hwirq_mask, reg);
}
else
{
writel(readl(reg) & ~hwirq_mask, reg);
}
}
void plic_init(void)
{
int nr_irqs;
int nr_context;
int i;
unsigned long hwirq;
int cpu = 0;
if (c906_plic_regs)
{
LOG_E("plic already initialized!");
return;
}
nr_context = C906_NR_CONTEXT;
c906_plic_regs = (void *)C906_PLIC_PHY_ADDR;
if (!c906_plic_regs)
{
LOG_E("fatal error, plic is reg space is null.");
return;
}
nr_irqs = C906_PLIC_NR_EXT_IRQS;
for (i = 0; i < nr_context; i ++)
{
struct plic_handler *handler;
uint32_t threshold = 0;
cpu = 0;
/* skip contexts other than supervisor external interrupt */
if (i == 0)
{
continue;
}
// we always use CPU0 M-mode target register.
handler = &c906_plic_handlers[cpu];
if (handler->present)
{
threshold = 0xffffffff;
goto done;
}
handler->present = RT_TRUE;
handler->hart_base = (void *)((rt_size_t)c906_plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART);
handler->enable_base = (void *)((rt_size_t)c906_plic_regs + ENABLE_BASE + i * ENABLE_PER_HART);
#ifdef RT_USING_SMART
handler->hart_base = (void *)rt_ioremap(handler->hart_base, 0x1000);
handler->enable_base = (void *)rt_ioremap(handler->enable_base, 0x1000);
#endif
done:
/* priority must be > threshold to trigger an interrupt */
writel(threshold, (void *)((rt_size_t)handler->hart_base + CONTEXT_THRESHOLD));
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
{
plic_toggle(handler, hwirq, 0);
}
}
/* Enable supervisor external interrupts. */
set_csr(sie, SIE_SEIE);
}

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
* 2023-04-22 flyingcys add C906_PLIC_PHY_ADDR macro judge
*/
#ifndef __RISCV64_PLIC_H__
#define __RISCV64_PLIC_H__
#include <interrupt.h>
#ifndef C906_PLIC_PHY_ADDR
#define C906_PLIC_PHY_ADDR (0x10000000)
#endif
#define C906_PLIC_NR_EXT_IRQS (IRQ_MAX_NR)
#define C906_NR_CPUS (NR_CPUS)
/* M and S mode context. */
#define C906_NR_CONTEXT (2)
#define MAX_DEVICES 1024
#define MAX_CONTEXTS 15872
/*
* Each interrupt source has a priority register associated with it.
* We always hardwire it to one in Linux.
*/
#define PRIORITY_BASE 0
#define PRIORITY_PER_ID 4
/*
* Each hart context has a vector of interrupt enable bits associated with it.
* There's one bit for each interrupt source.
*/
#define ENABLE_BASE 0x2000
#define ENABLE_PER_HART 0x80
/*
* Each hart context has a set of control registers associated with it. Right
* now there's only two: a source priority threshold over which the hart will
* take an interrupt, and a register to claim interrupts.
*/
#define CONTEXT_BASE 0x200000
#define CONTEXT_PER_HART 0x1000
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
void plic_init(void);
void plic_enable_irq(int irqno);
void plic_disable_irq(int irqno);
// tell PLIC that we've served this IRQ
void plic_complete(int irq);
void plic_handle_irq(void);
#endif

View File

@ -0,0 +1,209 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2021-05-03 lizhirui porting to c906
* 2023-10-12 Shell Add permission control API
*/
#ifndef __RISCV_MMU_H__
#define __RISCV_MMU_H__
#include <rtthread.h>
#include <rthw.h>
#include "riscv.h"
#undef PAGE_SIZE
/* C-SKY extend */
#define PTE_SEC (1UL << 59) /* Security */
#define PTE_SHARE (1UL << 60) /* Shareable */
#define PTE_BUF (1UL << 61) /* Bufferable */
#define PTE_CACHE (1UL << 62) /* Cacheable */
#define PTE_SO (1UL << 63) /* Strong Order */
#define PAGE_OFFSET_SHIFT 0
#define PAGE_OFFSET_BIT 12
#define PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
#define PAGE_OFFSET_MASK __MASK(PAGE_OFFSET_BIT)
#define VPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
#define VPN0_BIT 9
#define VPN1_SHIFT (VPN0_SHIFT + VPN0_BIT)
#define VPN1_BIT 9
#define VPN2_SHIFT (VPN1_SHIFT + VPN1_BIT)
#define VPN2_BIT 9
#define PPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
#define PPN0_BIT 9
#define PPN1_SHIFT (PPN0_SHIFT + PPN0_BIT)
#define PPN1_BIT 9
#define PPN2_SHIFT (PPN1_SHIFT + PPN1_BIT)
#define PPN2_BIT 26
#define PPN_BITS (PPN0_BIT + PPN1_BIT + PPN2_BIT)
#define L1_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT + VPN1_BIT)
#define L2_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT)
#define L3_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
#define ARCH_ADDRESS_WIDTH_BITS 64
#define PHYSICAL_ADDRESS_WIDTH_BITS 56
#define PAGE_ATTR_NEXT_LEVEL (0)
#define PAGE_ATTR_RWX (PTE_X | PTE_W | PTE_R)
#define PAGE_ATTR_READONLY (PTE_R)
#define PAGE_ATTR_XN (PTE_W | PTE_R)
#define PAGE_ATTR_READEXECUTE (PTE_X | PTE_R)
#define PAGE_ATTR_USER (PTE_U)
#define PAGE_ATTR_SYSTEM (0)
#define PAGE_ATTR_CB (PTE_BUF | PTE_CACHE)
#define PAGE_ATTR_DEV (PTE_SO)
#define PAGE_DEFAULT_ATTR_LEAF \
(PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_U | \
PAGE_ATTR_RWX | PTE_V)
#define PAGE_DEFAULT_ATTR_NEXT \
(PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_V)
#define PAGE_IS_LEAF(pte) __MASKVALUE(pte, PAGE_ATTR_RWX)
#define PTE_USED(pte) __MASKVALUE(pte, PTE_V)
#define PTE_WRAP(attr) (attr | PTE_A | PTE_D)
/**
* encoding of SATP (Supervisor Address Translation and Protection register)
*/
#define SATP_MODE_OFFSET 60
#define SATP_MODE_BARE 0
#define SATP_MODE_SV39 8
#define SATP_MODE_SV48 9
#define SATP_MODE_SV57 10
#define SATP_MODE_SV64 11
#define ARCH_VADDR_WIDTH 39
#define SATP_MODE SATP_MODE_SV39
#define MMU_MAP_K_DEVICE PTE_WRAP(PAGE_ATTR_DEV | PTE_G | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_K_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_K_RW PTE_WRAP(PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_ROCB \
PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_READONLY | PTE_V)
#define MMU_MAP_U_RWCB_XN PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_U_RW PTE_WRAP(PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_EARLY \
PTE_WRAP(PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE | PTE_SHARE | PTE_BUF)
#define MMU_MAP_TRACE(attr) (attr)
#define PTE_XWR_MASK 0xe
#define ARCH_PAGE_SIZE PAGE_SIZE
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
#define ARCH_PAGE_SHIFT PAGE_OFFSET_BIT
#define ARCH_INDEX_WIDTH 9
#define ARCH_INDEX_SIZE (1ul << ARCH_INDEX_WIDTH)
#define ARCH_INDEX_MASK (ARCH_INDEX_SIZE - 1)
#define ARCH_MAP_FAILED ((void *)0x8000000000000000)
void mmu_set_pagetable(rt_ubase_t addr);
void mmu_enable_user_page_access(void);
void mmu_disable_user_page_access(void);
#define RT_HW_MMU_PROT_READ 1
#define RT_HW_MMU_PROT_WRITE 2
#define RT_HW_MMU_PROT_EXECUTE 4
#define RT_HW_MMU_PROT_KERNEL 8
#define RT_HW_MMU_PROT_USER 16
#define RT_HW_MMU_PROT_CACHE 32
void rt_hw_asid_init(void);
struct rt_aspace;
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl);
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr &= ~PTE_W;
break;
/* remove write permission for kernel */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
attr &= ~PTE_W;
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* add write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr |= (PTE_R | PTE_W | PTE_U);
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
{
rt_bool_t rc = 0;
switch (prot & ~RT_HW_MMU_PROT_USER)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE:
rc = ((attr & PTE_W) && (attr & PTE_R));
break;
case RT_HW_MMU_PROT_READ:
rc = !!(attr & PTE_R);
break;
case RT_HW_MMU_PROT_EXECUTE:
rc = !!(attr & PTE_X);
break;
default:
RT_ASSERT(0);
}
if (rc && (prot & RT_HW_MMU_PROT_USER))
{
rc = !!(attr & PTE_U);
}
return rc;
}
#endif

View File

@ -0,0 +1,298 @@
/*
* Copyright lizhirui
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 lizhirui the first version
* 2021-05-20 lizhirui add os debug support
*/
#include <rtthread.h>
#include "symbol_analysis.h"
#define MEMORY_BASE 0x40000000
#define MEMORY_SIZE (128 * 0x100000)
extern rt_size_t _osdebug_start;
static os_symtab_header *symtab_header = (os_symtab_header *)&_osdebug_start;
//该函数用于在指定的表中查找某个地址对应的符号的描述结构体指针,返回值的符号遵循规则详见文档
os_symtab_item *find_symbol_table(rt_size_t symbol_table_addr,rt_size_t symbol_num,rt_size_t address)
{
rt_size_t left = 0;
rt_size_t right = symbol_num;
os_symtab_item *sym_table = (os_symtab_item *)((rt_size_t)&_osdebug_start + symbol_table_addr);
while(left < right)
{
rt_size_t mid = (left + right) >> 1;
//rt_kprintf("left = %d,right = %d,mid = %d\n",left,right,mid);
if(address < sym_table[mid].address)
{
right = mid;
while((right < symbol_num) && ((right - 1) >= 0) && (sym_table[right].address == sym_table[right - 1].address))
{
right--;
}
}
else if(address == sym_table[mid].address)
{
left = mid + 1;
break;
}
else
{
left = mid;
while((left >= 0) && ((left + 1) < symbol_num) && (sym_table[left].address == sym_table[left + 1].address))
{
left++;
}
left++;
}
}
left--;
if(left == ((rt_size_t)-1))
{
return RT_NULL;
}
while((left < symbol_num) && ((left - 1) >= 0) && (sym_table[left].address == sym_table[left - 1].address))
{
left--;
}
return &sym_table[left];
}
//该函数用于根据给定的符号指针从字符串表中找到对应的符号名指针并返回
const char *get_symbol_name(os_symtab_item *symbol)
{
return (const char *)((rt_size_t)&_osdebug_start + symtab_header -> string_table_offset + symbol -> name_offset);
}
//该函数可以根据给定的符号和地址向中断打印出标准格式的符号信息
void print_symbol(os_symtab_item *symbol,rt_size_t address)
{
rt_kprintf("<%s(0x%p)",get_symbol_name(symbol),symbol -> address);
if(symbol -> size)
{
rt_kprintf(" : 0x%x>",symbol -> size);
}
else
{
rt_kprintf(">");
}
if(address > symbol -> address)
{
rt_kprintf(" + 0x%x",address - symbol -> address);
}
}
//该函数用于打印出一个地址关联的全部符号信息
void print_symbol_info(rt_size_t address,rt_bool_t function)
{
os_symtab_item *function_symbol = find_symbol_table(symtab_header -> function_table_offset,symtab_header -> function_table_num,address);
os_symtab_item *object_symbol = find_symbol_table(symtab_header -> object_table_offset,symtab_header -> object_table_num,address);
os_symtab_item *general_symbol = find_symbol_table(symtab_header -> general_symbol_table_offset,symtab_header -> general_symbol_table_num,address);
const char *dot = "";
rt_bool_t valid = RT_FALSE;
if(function)
{
while(function_symbol != RT_NULL)
{
if((function_symbol -> address + function_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(function_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(function_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> function_table_offset + symtab_header -> function_table_num * sizeof(os_symtab_item)))
{
break;
}
if(function_symbol[0].address == function_symbol[1].address)
{
function_symbol++;
}
break;
}
if(!valid)
{
while(general_symbol != RT_NULL)
{
rt_kprintf(dot);
print_symbol(general_symbol,address);
dot = ",";
valid = RT_TRUE;
if(((rt_size_t)(general_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> general_symbol_table_offset + symtab_header -> general_symbol_table_num * sizeof(os_symtab_item)))
{
break;
}
if(general_symbol[0].address == general_symbol[1].address)
{
general_symbol++;
}
break;
}
while(object_symbol != RT_NULL)
{
if((object_symbol -> address + object_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(object_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(object_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> object_table_offset + symtab_header -> object_table_num * sizeof(os_symtab_item)))
{
break;
}
if(object_symbol[0].address == object_symbol[1].address)
{
object_symbol++;
}
break;
}
}
}
else
{
while(object_symbol != RT_NULL)
{
if((object_symbol -> address + object_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(object_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(object_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> object_table_offset + symtab_header -> object_table_num * sizeof(os_symtab_item)))
{
break;
}
if(object_symbol[0].address == object_symbol[1].address)
{
object_symbol++;
}
break;
}
if(!valid)
{
while(general_symbol != RT_NULL)
{
rt_kprintf(dot);
print_symbol(general_symbol,address);
dot = ",";
valid = RT_TRUE;
if(((rt_size_t)(general_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> general_symbol_table_offset + symtab_header -> general_symbol_table_num * sizeof(os_symtab_item)))
{
break;
}
if(general_symbol[0].address == general_symbol[1].address)
{
general_symbol++;
}
break;
}
while(function_symbol != RT_NULL)
{
if((function_symbol -> address + function_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(function_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(function_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> function_table_offset + symtab_header -> function_table_num * sizeof(os_symtab_item)))
{
break;
}
if(function_symbol[0].address == function_symbol[1].address)
{
function_symbol++;
}
break;
}
}
}
if(dot == "")
{
rt_kprintf("<Unknown Symbol>");
}
}
//该函数用于在出错时打印出栈跟踪信息
void print_stacktrace(rt_size_t epc,rt_size_t fp)
{
rt_kprintf("-----------------------------Dump Stacktrace----------------------------\n\n");
rt_size_t sp = fp;
rt_size_t i = 0;
rt_kprintf("address 0x%p(",epc);
print_symbol_info(epc,RT_TRUE);
rt_kprintf(")\n\n");
while(1)
{
if((sp >= MEMORY_BASE) && (sp < (MEMORY_BASE + MEMORY_SIZE)))
{
//rt_kprintf("%d: 0x%p\n",i,sp);
rt_size_t *stack = (rt_size_t *)(sp - sizeof(rt_size_t) * 2);
rt_size_t ra = stack[1];
if(!ra)
{
break;
}
rt_kprintf("return to 0x%p(",ra);
print_symbol_info(ra,RT_TRUE);
rt_kprintf(")\n\n");
//rt_kprintf("ra = 0x%p,fp = 0x%p\n",stack[1],stack[0]);
sp = stack[0];
i++;
}
else
{
break;
}
}
rt_kprintf("---------------------------------Dump OK--------------------------------\n");
}

View File

@ -0,0 +1,44 @@
/*
* Copyright lizhirui
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 lizhirui the first version
* 2021-05-20 lizhirui add os debug support
*/
#ifndef __SYMBOL_ANALYSIS_H__
#define __SYMBOL_ANALYSIS_H__
#include <rtthread.h>
//osdebug节区头描述结构体
typedef struct os_symtab_header
{
rt_size_t function_table_offset;//函数表的偏移地址(相对于节区起始地址,下同)
rt_size_t function_table_num;//函数表中的符号数量
rt_size_t object_table_offset;//对象表的偏移地址
rt_size_t object_table_num;//对象表中的符号数量
rt_size_t general_symbol_table_offset;//一般符号指代类型虽为NONE但带有GLOBAL的符号表的偏移地址
rt_size_t general_symbol_table_num;//一般符号表中的符号数量
rt_size_t string_table_offset;//字符串表的偏移地址
rt_size_t string_table_size;//字符串表的大小(字节为单位)
}os_symtab_header;
//符号描述结构体
typedef struct os_symtab_item
{
rt_size_t name_offset;//符号名称在字符串表中的偏移地址
rt_size_t address;//该符号所代表的地址
rt_size_t size;//该符号所代表的大小
}os_symtab_item;
os_symtab_item *find_symbol_table(rt_size_t symbol_table_addr,rt_size_t symbol_num,rt_size_t address);
const char *get_symbol_name(os_symtab_item *symbol);
void print_symbol(os_symtab_item *symbol,rt_size_t address);
void print_symbol_info(rt_size_t address,rt_bool_t function);
void print_stacktrace(rt_size_t epc,rt_size_t fp);
#endif

View File

@ -0,0 +1,10 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,10 @@
#ifndef _SBI_ASM_H
#define _SBI_ASM_H
.macro SBI_CALL which
li a7, \which
ecall
nop
.endm
#endif /* _SBI_ASM_H */

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2019-2020, Xim
*
* SPDX-License-Identifier: Apache-2.0
*
*/
#ifndef _ASM_SBI_DEF_H
#define _ASM_SBI_DEF_H
#define SBI_SET_TIMER 0
#define SBI_CONSOLE_PUTCHAR 1
#define SBI_CONSOLE_GETCHAR 2
#define SBI_CLEAR_IPI 3
#define SBI_SEND_IPI 4
#define SBI_REMOTE_FENCE_I 5
#define SBI_REMOTE_SFENCE_VMA 6
#define SBI_REMOTE_SFENCE_VMA_ASID 7
#define SBI_SHUTDOWN 8
#define SBI_CONSOLE_PUTSTR 9
#define SBI_SD_WRITE 10
#define SBI_SD_READ 11
#define SBI_NET_WRITE 12
#define SBI_NET_READ 13
#endif /* _ASM_SBI_DEF_H */

View File

@ -0,0 +1,140 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-29 lizhirui first version
* 2021-11-05 JasonHu add C908 cache inst
* 2022-11-09 WangXiaoyao Support cache coherence operations;
* improve portability and make
* no assumption on undefined behavior
*/
#include <rthw.h>
#include <rtdef.h>
#include <board.h>
#include <riscv.h>
#include "opcode.h"
#include "cache.h"
#define L1_CACHE_BYTES (64)
/**
* GCC version not support t-head cache flush, so we use fixed code to achieve.
* The following function cannot be optimized.
*/
static void dcache_wb_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
static void dcache_inv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
static void dcache_wbinv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
static void icache_inv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
#define CACHE_OP_RS1 %0
#define CACHE_OP_RANGE(instr) \
{ \
rt_ubase_t i = start & ~(L1_CACHE_BYTES - 1); \
for (; i < end; i += L1_CACHE_BYTES) \
{ \
__asm__ volatile(instr ::"r"(i) \
: "memory"); \
} \
}
static void dcache_wb_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_DCACHE_CVA(CACHE_OP_RS1));
}
static void dcachel1_wb_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_DCACHE_CVAL1(CACHE_OP_RS1));
}
static void dcache_inv_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_DCACHE_IVA(CACHE_OP_RS1));
}
static void dcache_wbinv_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_DCACHE_CIVA(CACHE_OP_RS1));
}
static void icache_inv_range(unsigned long start, unsigned long end)
{
CACHE_OP_RANGE(OPC_ICACHE_IVA(CACHE_OP_RS1));
}
rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
{
return L1_CACHE_BYTES;
}
rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
{
return L1_CACHE_BYTES;
}
void rt_hw_cpu_icache_invalidate_local(void *addr, int size)
{
icache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync_i();
}
void rt_hw_cpu_dcache_invalidate_local(void *addr, int size)
{
dcache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync();
}
void rt_hw_cpu_dcache_clean_local(void *addr, int size)
{
dcache_wb_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync();
}
void rt_hw_cpu_dcache_clean_invalidate_local(void *addr, int size)
{
dcache_wbinv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
rt_hw_cpu_sync();
}
void rt_hw_cpu_dcachel1_clean_local(void *addr, int size)
{
__asm__ volatile(OPC_DCACHE_CVAL1(a0)::
: "memory");
}
/**
* =====================================================
* Architecture Independent API
* =====================================================
*/
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_INVALIDATE)
{
rt_hw_cpu_icache_invalidate(addr, size);
}
}
void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_FLUSH)
{
rt_hw_cpu_dcache_clean(addr, size);
}
else
{
rt_hw_cpu_dcache_invalidate(addr, size);
}
}
void rt_hw_sync_cache_local(void *addr, int size)
{
rt_hw_cpu_dcachel1_clean_local(addr, size);
rt_hw_cpu_icache_invalidate_local(addr, size);
}

View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-05 JasonHu The first version
* 2022-11-09 WangXiaoyao Distinguish local and broadcast operations
*/
#ifndef CACHE_H__
#define CACHE_H__
#include "opcode.h"
#ifndef ALWAYS_INLINE
#define ALWAYS_INLINE inline __attribute__((always_inline))
#endif
ALWAYS_INLINE void rt_hw_cpu_sync(void)
{
asm volatile(OPC_SYNC::
: "memory");
}
ALWAYS_INLINE void rt_hw_cpu_sync_i(void)
{
asm volatile(OPC_SYNC_I::
: "memory");
}
ALWAYS_INLINE void rt_hw_cpu_sync_s(void)
{
asm volatile(OPC_SYNC_S::
: "memory");
}
ALWAYS_INLINE void rt_hw_cpu_sync_is(void)
{
asm volatile(OPC_SYNC_IS::
: "memory");
}
/**
* ========================================
* Local cpu cache maintainence operations
* ========================================
*/
void rt_hw_cpu_dcache_clean_local(void *addr, int size);
void rt_hw_cpu_dcache_invalidate_local(void *addr, int size);
void rt_hw_cpu_dcache_clean_invalidate_local(void *addr, int size);
void rt_hw_cpu_icache_invalidate_local(void *addr, int size);
ALWAYS_INLINE void rt_hw_cpu_dcache_clean_all_local(void)
{
__asm__ volatile(OPC_DCACHE_CALL ::
: "memory");
rt_hw_cpu_sync();
}
ALWAYS_INLINE void rt_hw_cpu_dcache_invalidate_all_local(void)
{
__asm__ volatile(OPC_DCACHE_IALL ::
: "memory");
rt_hw_cpu_sync();
}
ALWAYS_INLINE void rt_hw_cpu_dcache_clean_invalidate_all_local(void)
{
__asm__ volatile(OPC_DCACHE_CIALL ::
: "memory");
rt_hw_cpu_sync();
}
ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_all_local(void)
{
__asm__ volatile(OPC_ICACHE_IALL ::
: "memory");
rt_hw_cpu_sync_i();
}
#define rt_hw_icache_invalidate_all rt_hw_cpu_icache_invalidate_all
/**
* ========================================
* Multi-core cache maintainence operations
* ========================================
*/
#ifdef RT_USING_SMP
#error "TODO: cache maintainence have not ported to RISC-V SMP yet"
void rt_hw_cpu_dcache_clean(void *addr, int size);
void rt_hw_cpu_dcache_invalidate(void *addr, int size);
void rt_hw_cpu_dcache_clean_invalidate(void *addr, int size);
void rt_hw_cpu_dcache_clean_all(void);
void rt_hw_cpu_dcache_invalidate_all(void);
void rt_hw_cpu_dcache_clean_invalidate_all(void);
void rt_hw_cpu_icache_invalidate(void *addr, int size);
void rt_hw_cpu_icache_invalidate_all(void);
#else /* !RT_USING_SMP */
#define rt_hw_cpu_dcache_clean rt_hw_cpu_dcache_clean_local
#define rt_hw_cpu_dcache_invalidate rt_hw_cpu_dcache_invalidate_local
#define rt_hw_cpu_dcache_clean_and_invalidate rt_hw_cpu_dcache_clean_invalidate_local
#define rt_hw_cpu_dcache_clean_all rt_hw_cpu_dcache_clean_all_local
#define rt_hw_cpu_dcache_invalidate_all rt_hw_cpu_dcache_invalidate_all_local
#define rt_hw_cpu_dcache_clean_invalidate_all rt_hw_cpu_dcache_clean_invalidate_all_local
#define rt_hw_cpu_icache_invalidate rt_hw_cpu_icache_invalidate_local
#define rt_hw_cpu_icache_invalidate_all rt_hw_cpu_icache_invalidate_all_local
#endif /* RT_USING_SMP */
/**
* @brief Synchronize cache to Point of Unification
*/
void rt_hw_sync_cache_local(void *addr, int size);
#endif /* CACHE_H__ */

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
*/
#include <rthw.h>
#include <rtthread.h>
#include "interrupt.h"
#include "riscv.h"
#include "plic.h"
extern rt_atomic_t rt_interrupt_nest;
extern rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
extern rt_uint32_t rt_thread_switch_interrupt_flag;
struct rt_irq_desc isr_table[INTERRUPTS_MAX];
static void rt_hw_interrupt_handler(int vector, void *param)
{
rt_kprintf("Unhandled interrupt %d occured!!!\n", vector);
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
/* init interrupt controller */
plic_init();
rt_int32_t idx;
rt_memset(isr_table, 0x00, sizeof(isr_table));
for (idx = 0; idx < INTERRUPTS_MAX; idx++)
{
isr_table[idx].handler = rt_hw_interrupt_handler;
}
/* init interrupt nest, and context in thread sp */
rt_interrupt_nest = 0;
rt_interrupt_from_thread = 0;
rt_interrupt_to_thread = 0;
rt_thread_switch_interrupt_flag = 0;
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
if ((vector < 0) || (vector > IRQ_MAX_NR))
{
return;
}
plic_disable_irq(vector);
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
if ((vector < 0) || (vector > IRQ_MAX_NR))
{
return;
}
plic_enable_irq(vector);
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param handler the interrupt service routine to be installed
* @param param the interrupt service function parameter
* @param name the interrupt name
* @return old handler
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if ((vector < 0) || (vector > IRQ_MAX_NR))
{
return old_handler;
}
old_handler = isr_table[IRQ_OFFSET + vector].handler;
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(isr_table[IRQ_OFFSET + vector].name, name, RT_NAME_MAX);
#endif /* RT_USING_INTERRUPT_INFO */
isr_table[IRQ_OFFSET + vector].handler = handler;
isr_table[IRQ_OFFSET + vector].param = param;
return old_handler;
}

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
*/
#ifndef __INTERRUPT_H__
#define __INTERRUPT_H__
#include <rthw.h>
#define NR_CPUS 1
#define IRQ_OFFSET 16
#ifndef IRQ_MAX_NR
#define IRQ_MAX_NR 200
#endif
#define INTERRUPTS_MAX (IRQ_OFFSET + IRQ_MAX_NR)
enum {
EP_INSTRUCTION_ADDRESS_MISALIGNED = 0,
EP_INSTRUCTION_ACCESS_FAULT,
EP_ILLEGAL_INSTRUCTION,
EP_BREAKPOINT,
EP_LOAD_ADDRESS_MISALIGNED,
EP_LOAD_ACCESS_FAULT,
EP_STORE_ADDRESS_MISALIGNED,
EP_STORE_ACCESS_FAULT,
EP_ENVIRONMENT_CALL_U_MODE,
EP_ENVIRONMENT_CALL_S_MODE,
EP_RESERVED10,
EP_ENVIRONMENT_CALL_M_MODE,
EP_INSTRUCTION_PAGE_FAULT, /* page attr */
EP_LOAD_PAGE_FAULT, /* read data */
EP_RESERVED14,
EP_STORE_PAGE_FAULT, /* write data */
};
void rt_hw_interrupt_init(void);
void rt_hw_interrupt_mask(int vector);
void rt_hw_interrupt_umask(int vector);
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler, void *param, const char *name);
#endif

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-09 WangXiaoyao Add portable asm support
* 2022-03-16 WangXiaoyao Porting to xtheadsync & xtheadcmo ISA extension
*/
#ifndef __OPCODE_H__
#define __OPCODE_H__
/**
* @brief binary opcode pseudo operations
* Used to bypass toolchain restriction on extension ISA
*
* WARNING: Xuantie ISAs are not compatible to each other in opcode.
* It's painful to port this file, and should be really careful.
*/
/**
* @brief RISC-V instruction formats
*/
/**
* R type: .insn r opcode6, func3, func7, rd, rs1, rs2
*
* +-------+-----+-----+-------+----+---------+
* | func7 | rs2 | rs1 | func3 | rd | opcode6 |
* +-------+-----+-----+-------+----+---------+
* 31 25 20 15 12 7 0
*/
#define __OPC_INSN_FORMAT_R(opcode, func3, func7, rd, rs1, rs2) \
".insn r "RT_STRINGIFY(opcode)","RT_STRINGIFY(func3)","RT_STRINGIFY(func7)","RT_STRINGIFY(rd)","RT_STRINGIFY(rs1)","RT_STRINGIFY(rs2)
/**
* @brief Xuantie T-HEAD extension ISA format
* Compatible to Xuantie C908 user manual v03
*/
#define __OPC_INSN_FORMAT_CACHE(func7, rs2, rs1) \
__OPC_INSN_FORMAT_R(0x0b, 0x0, func7, x0, rs1, rs2)
#ifdef _TOOLCHAIN_SUPP_XTHEADE_ISA_
#define OPC_SYNC "sync"
#define OPC_SYNC_S "sync.s"
#define OPC_SYNC_I "sync.i"
#define OPC_SYNC_IS "sync.is"
#define OPC_DCACHE_CALL "dcache.call"
#define OPC_DCACHE_IALL "dcache.iall"
#define OPC_DCACHE_CIALL "dcache.ciall"
#define OPC_DCACHE_CVAL1(rs1) "dcache.cval1 "_TOSTR(rs1)
#define OPC_ICACHE_IALL "icache.iall"
#define OPC_DCACHE_CVA(rs1) "dcache.cva "RT_STRINGIFY(rs1)
#define OPC_DCACHE_IVA(rs1) "dcache.iva "RT_STRINGIFY(rs1)
#define OPC_DCACHE_CIVA(rs1) "dcache.civa "RT_STRINGIFY(rs1)
#define OPC_ICACHE_IVA(rs1) "icache.iva "RT_STRINGIFY(rs1)
#else /* !_TOOLCHAIN_NOT_SUPP_THEAD_ISA_ */
#define OPC_SYNC ".long 0x0180000B"
#define OPC_SYNC_S ".long 0x0190000B"
#define OPC_SYNC_I ".long 0x01A0000B"
#define OPC_SYNC_IS ".long 0x01B0000B"
#define OPC_DCACHE_CALL ".long 0x0010000B"
#define OPC_DCACHE_IALL ".long 0x0020000B"
#define OPC_DCACHE_CIALL ".long 0x0030000B"
#define OPC_DCACHE_CVAL1(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x4, rs1)
#define OPC_ICACHE_IALL ".long 0x0100000B"
#define OPC_DCACHE_CVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x5, rs1)
#define OPC_DCACHE_IVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x6, rs1)
#define OPC_DCACHE_CIVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x7, rs1)
#define OPC_ICACHE_IVA(rs1) __OPC_INSN_FORMAT_CACHE(0x1, x16, rs1)
#endif /* _TOOLCHAIN_NOT_SUPP_THEAD_ISA_ */
/**
* @brief RISC-V zifencei ISA
*/
#ifdef _TOOLCHAIN_SUPP_ZIFENCEI_ISA_
#define OPC_FENCE_I "fence.i"
#else /* !_TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
#define OPC_FENCE_I ".long 0x0000100F"
#endif /* _TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
#endif /* __OPCODE_H__ */

View File

@ -0,0 +1,220 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
* 2021-11-12 JasonHu fix bug that not intr on f133
* 2023-04-22 flyingcys add plic register ioremap
*/
#include <rtthread.h>
#include <rtdbg.h>
#include "plic.h"
#include "interrupt.h"
#include "io.h"
#include "encoding.h"
#include "ioremap.h"
static void *plic_regs = RT_NULL;
extern struct rt_irq_desc isr_table[];
struct plic_handler
{
rt_bool_t present;
void *hart_base;
void *enable_base;
};
rt_inline void plic_toggle(struct plic_handler *handler, int hwirq, int enable);
struct plic_handler plic_handlers[C908_NR_CPUS];
static void *plic_irq_priority[INTERRUPTS_MAX] = {RT_NULL};
rt_inline void plic_irq_toggle(int hwirq, int enable)
{
int cpu = 0;
void *priority_addr;
/* set priority of interrupt, interrupt 0 is zero. */
priority_addr = (void *)((rt_size_t)plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
#ifdef RT_USING_SMART
if (plic_irq_priority[hwirq] == RT_NULL)
{
plic_irq_priority[hwirq] = (void *)rt_ioremap(priority_addr, 0x1000);
}
priority_addr = plic_irq_priority[hwirq];
#endif
writel(enable, priority_addr);
struct plic_handler *handler = &plic_handlers[cpu];
if (handler->present)
{
plic_toggle(handler, hwirq, enable);
}
}
static void generic_handle_irq(int irq)
{
rt_isr_handler_t isr;
void *param;
if (irq < 0 || irq >= IRQ_MAX_NR)
{
LOG_E("bad irq number %d!\n", irq);
return;
}
if (!irq) // irq = 0 => no irq
{
LOG_W("no irq!\n");
return;
}
isr = isr_table[IRQ_OFFSET + irq].handler;
param = isr_table[IRQ_OFFSET + irq].param;
if (isr != RT_NULL)
{
isr(irq, param);
}
/* complete irq. */
plic_complete(irq);
}
void plic_complete(int irqno)
{
int cpu = 0;
struct plic_handler *handler = &plic_handlers[cpu];
writel(irqno, (void *)((rt_size_t)handler->hart_base + CONTEXT_CLAIM));
}
void plic_disable_irq(int irqno)
{
plic_irq_toggle(irqno, 0);
}
void plic_enable_irq(int irqno)
{
plic_irq_toggle(irqno, 1);
}
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
* that source ID back to the same claim register. This automatically enables
* and disables the interrupt, so there's nothing else to do.
*/
void plic_handle_irq(void)
{
int cpu = 0;
unsigned int irq;
struct plic_handler *handler = &plic_handlers[cpu];
void *claim = (void *)((rt_size_t)handler->hart_base + CONTEXT_CLAIM);
if (plic_regs == RT_NULL || !handler->present)
{
LOG_E("plic state not initialized.");
return;
}
clear_csr(sie, SIE_SEIE);
while ((irq = readl(claim)))
{
/* ID0 is diabled permantually from spec. */
if (irq == 0)
{
LOG_E("irq no is zero.");
}
else
{
generic_handle_irq(irq);
}
}
set_csr(sie, SIE_SEIE);
}
rt_inline void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
uint32_t *reg = (uint32_t *)((rt_size_t)handler->enable_base + (hwirq / 32) * sizeof(uint32_t));
uint32_t hwirq_mask = 1 << (hwirq % 32);
if (enable)
{
writel(readl(reg) | hwirq_mask, reg);
}
else
{
writel(readl(reg) & ~hwirq_mask, reg);
}
}
void plic_init(void)
{
int nr_irqs;
int nr_context;
int i;
unsigned long hwirq;
int cpu = 0;
if (plic_regs)
{
LOG_E("plic already initialized!");
return;
}
nr_context = C908_NR_CONTEXT;
plic_regs = (void *)C908_PLIC_PHY_ADDR;
if (!plic_regs)
{
LOG_E("fatal error, plic is reg space is null.");
return;
}
nr_irqs = C908_PLIC_NR_EXT_IRQS;
for (i = 0; i < nr_context; i ++)
{
struct plic_handler *handler;
uint32_t threshold = 0;
cpu = 0;
/* skip contexts other than supervisor external interrupt */
if (i == 0)
{
continue;
}
// we always use CPU0 M-mode target register.
handler = &plic_handlers[cpu];
if (handler->present)
{
threshold = 0xffffffff;
goto done;
}
handler->present = RT_TRUE;
handler->hart_base = (void *)((rt_size_t)plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART);
handler->enable_base = (void *)((rt_size_t)plic_regs + ENABLE_BASE + i * ENABLE_PER_HART);
#ifdef RT_USING_SMART
handler->hart_base = (void *)rt_ioremap(handler->hart_base, 0x1000);
handler->enable_base = (void *)rt_ioremap(handler->enable_base, 0x1000);
#endif
done:
/* priority must be > threshold to trigger an interrupt */
writel(threshold, (void *)((rt_size_t)handler->hart_base + CONTEXT_THRESHOLD));
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
{
plic_toggle(handler, hwirq, 0);
}
}
/* Enable supervisor external interrupts. */
set_csr(sie, SIE_SEIE);
}

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-19 JasonHu first version
* 2023-04-22 flyingcys add C906_PLIC_PHY_ADDR macro judge
*/
#ifndef __RISCV64_PLIC_H__
#define __RISCV64_PLIC_H__
#include <interrupt.h>
#ifndef C908_PLIC_PHY_ADDR
#define C908_PLIC_PHY_ADDR (0xF00000000UL)
#endif
#define C908_PLIC_NR_EXT_IRQS (IRQ_MAX_NR)
#define C908_NR_CPUS (NR_CPUS)
/* M and S mode context. */
#define C908_NR_CONTEXT (2)
#define MAX_DEVICES 1024
#define MAX_CONTEXTS 15872
/*
* Each interrupt source has a priority register associated with it.
* We always hardwire it to one in Linux.
*/
#define PRIORITY_BASE 0
#define PRIORITY_PER_ID 4
/*
* Each hart context has a vector of interrupt enable bits associated with it.
* There's one bit for each interrupt source.
*/
#define ENABLE_BASE 0x2000
#define ENABLE_PER_HART 0x80
/*
* Each hart context has a set of control registers associated with it. Right
* now there's only two: a source priority threshold over which the hart will
* take an interrupt, and a register to claim interrupts.
*/
#define CONTEXT_BASE 0x200000
#define CONTEXT_PER_HART 0x1000
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
void plic_init(void);
void plic_enable_irq(int irqno);
void plic_disable_irq(int irqno);
// tell PLIC that we've served this IRQ
void plic_complete(int irq);
void plic_handle_irq(void);
#endif

View File

@ -0,0 +1,202 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2021-05-03 lizhirui porting to C906
* 2023-10-12 Shell Add permission control API
*/
#ifndef __RISCV_MMU_H__
#define __RISCV_MMU_H__
#include <rtthread.h>
#include <rthw.h>
#include "riscv.h"
#undef PAGE_SIZE
/* C-SKY extend */
#define PTE_SEC (1UL << 59) /* Security */
#define PTE_SHARE (1UL << 60) /* Shareable */
#define PTE_BUF (1UL << 61) /* Bufferable */
#define PTE_CACHE (1UL << 62) /* Cacheable */
#define PTE_SO (1UL << 63) /* Strong Order */
#define PAGE_OFFSET_SHIFT 0
#define PAGE_OFFSET_BIT 12
#define PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
#define PAGE_OFFSET_MASK __MASK(PAGE_OFFSET_BIT)
#define VPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
#define VPN0_BIT 9
#define VPN1_SHIFT (VPN0_SHIFT + VPN0_BIT)
#define VPN1_BIT 9
#define VPN2_SHIFT (VPN1_SHIFT + VPN1_BIT)
#define VPN2_BIT 9
#define PPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
#define PPN0_BIT 9
#define PPN1_SHIFT (PPN0_SHIFT + PPN0_BIT)
#define PPN1_BIT 9
#define PPN2_SHIFT (PPN1_SHIFT + PPN1_BIT)
#define PPN2_BIT 26
#define L1_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT + VPN1_BIT)
#define L2_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT)
#define L3_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
#define ARCH_ADDRESS_WIDTH_BITS 64
#define PHYSICAL_ADDRESS_WIDTH_BITS 56
#define PAGE_ATTR_NEXT_LEVEL (0)
#define PAGE_ATTR_RWX (PTE_X | PTE_W | PTE_R)
#define PAGE_ATTR_READONLY (PTE_R)
#define PAGE_ATTR_READEXECUTE (PTE_X | PTE_R)
#define PAGE_ATTR_USER (PTE_U)
#define PAGE_ATTR_SYSTEM (0)
#define PAGE_DEFAULT_ATTR_LEAF \
(PAGE_ATTR_RWX | PAGE_ATTR_USER | PTE_V | PTE_G | PTE_SHARE | PTE_BUF | \
PTE_CACHE | PTE_A | PTE_D)
#define PAGE_DEFAULT_ATTR_NEXT \
(PAGE_ATTR_NEXT_LEVEL | PTE_V | PTE_G | PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D)
#define PAGE_IS_LEAF(pte) __MASKVALUE(pte, PAGE_ATTR_RWX)
#define PTE_USED(pte) __MASKVALUE(pte, PTE_V)
#define PTE_WRAP(attr) (attr | PTE_A | PTE_D)
/**
* encoding of SATP (Supervisor Address Translation and Protection register)
*/
#define SATP_MODE_OFFSET 60
#define SATP_MODE_BARE 0
#define SATP_MODE_SV39 8
#define SATP_MODE_SV48 9
#define SATP_MODE_SV57 10
#define SATP_MODE_SV64 11
#define ARCH_VADDR_WIDTH 39
#define SATP_MODE SATP_MODE_SV39
//compatible to rt-smart new version
#define MMU_MAP_K_DEVICE (PTE_BUF | PTE_SO | PTE_A | PTE_D | PTE_G | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_K_RW (PTE_SHARE | PTE_A | PTE_D | PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_K_RWCB (MMU_MAP_K_RW | PTE_BUF | PTE_CACHE)
#define MMU_MAP_U_RW (PTE_SHARE | PTE_U | PTE_A | PTE_D | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB (MMU_MAP_U_RW | PTE_BUF | PTE_CACHE)
#define MMU_MAP_EARLY \
PTE_WRAP(PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE | PTE_SHARE | PTE_BUF)
#define MMU_MAP_TRACE(attr) (attr)
#define PTE_XWR_MASK 0xe
#define ARCH_PAGE_SIZE PAGE_SIZE
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
#define ARCH_PAGE_SHIFT PAGE_OFFSET_BIT
#define ARCH_INDEX_WIDTH 9
#define ARCH_MAP_FAILED ((void *)0x8000000000000000)
void mmu_set_pagetable(rt_ubase_t addr);
void mmu_enable_user_page_access(void);
void mmu_disable_user_page_access(void);
#define RT_HW_MMU_PROT_READ 1
#define RT_HW_MMU_PROT_WRITE 2
#define RT_HW_MMU_PROT_EXECUTE 4
#define RT_HW_MMU_PROT_KERNEL 8
#define RT_HW_MMU_PROT_USER 16
#define RT_HW_MMU_PROT_CACHE 32
void rt_hw_asid_init(void);
struct rt_aspace;
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl);
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr &= ~PTE_W;
break;
/* remove write permission for kernel */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
attr &= ~PTE_W;
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* add write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr |= (PTE_R | PTE_W | PTE_U);
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
{
rt_bool_t rc = 0;
switch (prot & ~RT_HW_MMU_PROT_USER)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE:
rc = ((attr & PTE_W) && (attr & PTE_R));
break;
case RT_HW_MMU_PROT_READ:
rc = !!(attr & PTE_R);
break;
case RT_HW_MMU_PROT_EXECUTE:
rc = !!(attr & PTE_X);
break;
default:
RT_ASSERT(0);
}
if (rc && (prot & RT_HW_MMU_PROT_USER))
{
rc = !!(attr & PTE_U);
}
return rc;
}
#endif

View File

@ -0,0 +1,298 @@
/*
* Copyright lizhirui
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 lizhirui the first version
* 2021-05-20 lizhirui add os debug support
*/
#include <rtthread.h>
#include "symbol_analysis.h"
#define MEMORY_BASE 0x40000000
#define MEMORY_SIZE (128 * 0x100000)
extern rt_size_t _osdebug_start;
static os_symtab_header *symtab_header = (os_symtab_header *)&_osdebug_start;
//该函数用于在指定的表中查找某个地址对应的符号的描述结构体指针,返回值的符号遵循规则详见文档
os_symtab_item *find_symbol_table(rt_size_t symbol_table_addr,rt_size_t symbol_num,rt_size_t address)
{
rt_size_t left = 0;
rt_size_t right = symbol_num;
os_symtab_item *sym_table = (os_symtab_item *)((rt_size_t)&_osdebug_start + symbol_table_addr);
while(left < right)
{
rt_size_t mid = (left + right) >> 1;
//rt_kprintf("left = %d,right = %d,mid = %d\n",left,right,mid);
if(address < sym_table[mid].address)
{
right = mid;
while((right < symbol_num) && ((right - 1) >= 0) && (sym_table[right].address == sym_table[right - 1].address))
{
right--;
}
}
else if(address == sym_table[mid].address)
{
left = mid + 1;
break;
}
else
{
left = mid;
while((left >= 0) && ((left + 1) < symbol_num) && (sym_table[left].address == sym_table[left + 1].address))
{
left++;
}
left++;
}
}
left--;
if(left == ((rt_size_t)-1))
{
return RT_NULL;
}
while((left < symbol_num) && ((left - 1) >= 0) && (sym_table[left].address == sym_table[left - 1].address))
{
left--;
}
return &sym_table[left];
}
//该函数用于根据给定的符号指针从字符串表中找到对应的符号名指针并返回
const char *get_symbol_name(os_symtab_item *symbol)
{
return (const char *)((rt_size_t)&_osdebug_start + symtab_header -> string_table_offset + symbol -> name_offset);
}
//该函数可以根据给定的符号和地址向中断打印出标准格式的符号信息
void print_symbol(os_symtab_item *symbol,rt_size_t address)
{
rt_kprintf("<%s(0x%p)",get_symbol_name(symbol),symbol -> address);
if(symbol -> size)
{
rt_kprintf(" : 0x%x>",symbol -> size);
}
else
{
rt_kprintf(">");
}
if(address > symbol -> address)
{
rt_kprintf(" + 0x%x",address - symbol -> address);
}
}
//该函数用于打印出一个地址关联的全部符号信息
void print_symbol_info(rt_size_t address,rt_bool_t function)
{
os_symtab_item *function_symbol = find_symbol_table(symtab_header -> function_table_offset,symtab_header -> function_table_num,address);
os_symtab_item *object_symbol = find_symbol_table(symtab_header -> object_table_offset,symtab_header -> object_table_num,address);
os_symtab_item *general_symbol = find_symbol_table(symtab_header -> general_symbol_table_offset,symtab_header -> general_symbol_table_num,address);
const char *dot = "";
rt_bool_t valid = RT_FALSE;
if(function)
{
while(function_symbol != RT_NULL)
{
if((function_symbol -> address + function_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(function_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(function_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> function_table_offset + symtab_header -> function_table_num * sizeof(os_symtab_item)))
{
break;
}
if(function_symbol[0].address == function_symbol[1].address)
{
function_symbol++;
}
break;
}
if(!valid)
{
while(general_symbol != RT_NULL)
{
rt_kprintf(dot);
print_symbol(general_symbol,address);
dot = ",";
valid = RT_TRUE;
if(((rt_size_t)(general_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> general_symbol_table_offset + symtab_header -> general_symbol_table_num * sizeof(os_symtab_item)))
{
break;
}
if(general_symbol[0].address == general_symbol[1].address)
{
general_symbol++;
}
break;
}
while(object_symbol != RT_NULL)
{
if((object_symbol -> address + object_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(object_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(object_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> object_table_offset + symtab_header -> object_table_num * sizeof(os_symtab_item)))
{
break;
}
if(object_symbol[0].address == object_symbol[1].address)
{
object_symbol++;
}
break;
}
}
}
else
{
while(object_symbol != RT_NULL)
{
if((object_symbol -> address + object_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(object_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(object_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> object_table_offset + symtab_header -> object_table_num * sizeof(os_symtab_item)))
{
break;
}
if(object_symbol[0].address == object_symbol[1].address)
{
object_symbol++;
}
break;
}
if(!valid)
{
while(general_symbol != RT_NULL)
{
rt_kprintf(dot);
print_symbol(general_symbol,address);
dot = ",";
valid = RT_TRUE;
if(((rt_size_t)(general_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> general_symbol_table_offset + symtab_header -> general_symbol_table_num * sizeof(os_symtab_item)))
{
break;
}
if(general_symbol[0].address == general_symbol[1].address)
{
general_symbol++;
}
break;
}
while(function_symbol != RT_NULL)
{
if((function_symbol -> address + function_symbol -> size) > address)
{
rt_kprintf(dot);
print_symbol(function_symbol,address);
dot = ",";
valid = RT_TRUE;
}
if(((rt_size_t)(function_symbol + 1)) >= (((rt_size_t)&_osdebug_start) + symtab_header -> function_table_offset + symtab_header -> function_table_num * sizeof(os_symtab_item)))
{
break;
}
if(function_symbol[0].address == function_symbol[1].address)
{
function_symbol++;
}
break;
}
}
}
if(dot == "")
{
rt_kprintf("<Unknown Symbol>");
}
}
//该函数用于在出错时打印出栈跟踪信息
void print_stacktrace(rt_size_t epc,rt_size_t fp)
{
rt_kprintf("-----------------------------Dump Stacktrace----------------------------\n\n");
rt_size_t sp = fp;
rt_size_t i = 0;
rt_kprintf("address 0x%p(",epc);
print_symbol_info(epc,RT_TRUE);
rt_kprintf(")\n\n");
while(1)
{
if((sp >= MEMORY_BASE) && (sp < (MEMORY_BASE + MEMORY_SIZE)))
{
//rt_kprintf("%d: 0x%p\n",i,sp);
rt_size_t *stack = (rt_size_t *)(sp - sizeof(rt_size_t) * 2);
rt_size_t ra = stack[1];
if(!ra)
{
break;
}
rt_kprintf("return to 0x%p(",ra);
print_symbol_info(ra,RT_TRUE);
rt_kprintf(")\n\n");
//rt_kprintf("ra = 0x%p,fp = 0x%p\n",stack[1],stack[0]);
sp = stack[0];
i++;
}
else
{
break;
}
}
rt_kprintf("---------------------------------Dump OK--------------------------------\n");
}

View File

@ -0,0 +1,44 @@
/*
* Copyright lizhirui
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 lizhirui the first version
* 2021-05-20 lizhirui add os debug support
*/
#ifndef __SYMBOL_ANALYSIS_H__
#define __SYMBOL_ANALYSIS_H__
#include <rtthread.h>
//osdebug节区头描述结构体
typedef struct os_symtab_header
{
rt_size_t function_table_offset;//函数表的偏移地址(相对于节区起始地址,下同)
rt_size_t function_table_num;//函数表中的符号数量
rt_size_t object_table_offset;//对象表的偏移地址
rt_size_t object_table_num;//对象表中的符号数量
rt_size_t general_symbol_table_offset;//一般符号指代类型虽为NONE但带有GLOBAL的符号表的偏移地址
rt_size_t general_symbol_table_num;//一般符号表中的符号数量
rt_size_t string_table_offset;//字符串表的偏移地址
rt_size_t string_table_size;//字符串表的大小(字节为单位)
}os_symtab_header;
//符号描述结构体
typedef struct os_symtab_item
{
rt_size_t name_offset;//符号名称在字符串表中的偏移地址
rt_size_t address;//该符号所代表的地址
rt_size_t size;//该符号所代表的大小
}os_symtab_item;
os_symtab_item *find_symbol_table(rt_size_t symbol_table_addr,rt_size_t symbol_num,rt_size_t address);
const char *get_symbol_name(os_symtab_item *symbol);
void print_symbol(os_symtab_item *symbol,rt_size_t address);
void print_symbol_info(rt_size_t address,rt_bool_t function);
void print_stacktrace(rt_size_t epc,rt_size_t fp);
#endif

View File

@ -0,0 +1,12 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = []
CPPPATH += [cwd + '/rvv-1.0']
group = DefineGroup('libcpu', src, depend = ['ARCH_RISCV_VECTOR'], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,112 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-10 RT-Thread the first version,
* compatible to riscv-v-spec-1.0
*/
#ifndef __RVV_CONTEXT_1_0_H__
#define __RVV_CONTEXT_1_0_H__
#if defined(ARCH_VECTOR_VLEN_128)
#define CTX_VECTOR_REGS 64
#elif defined(ARCH_VECTOR_VLEN_256)
#define CTX_VECTOR_REGS 128
#else
#error "No supported VLEN"
#endif /* VLEN */
#define CTX_VECTOR_REG_NR (CTX_VECTOR_REGS + 4)
#ifdef __ASSEMBLY__
/**
* ==================================
* VECTOR EXTENSION
* ==================================
*/
#define VEC_FRAME_VSTART (0 * REGBYTES)
#define VEC_FRAME_VTYPE (1 * REGBYTES)
#define VEC_FRAME_VL (2 * REGBYTES)
#define VEC_FRAME_VCSR (3 * REGBYTES)
#define VEC_FRAME_V0 (4 * REGBYTES)
.macro GET_VEC_FRAME_LEN, xreg
csrr \xreg, vlenb
slli \xreg, \xreg, 5
addi \xreg, \xreg, 4 * REGBYTES
.endm
/**
* @brief save vector extension hardware state
*
* @param dst register storing bottom of storage block
*
*/
.macro SAVE_VECTOR, dst
mv t1, \dst
csrr t0, vstart
STORE t0, VEC_FRAME_VSTART(t1)
csrr t0, vtype
STORE t0, VEC_FRAME_VTYPE(t1)
csrr t0, vl
STORE t0, VEC_FRAME_VL(t1)
csrr t0, vcsr
STORE t0, VEC_FRAME_VCSR(t1)
addi t1, t1, VEC_FRAME_V0
// config vector setting,
// t2 is updated to length of a vector group in bytes
VEC_CONFIG_SETVLI(t2, x0, VEC_IMM_SEW_8, VEC_IMM_LMUL_8)
vse8.v v0, (t1)
add t1, t1, t2
vse8.v v8, (t1)
add t1, t1, t2
vse8.v v16, (t1)
add t1, t1, t2
vse8.v v24, (t1)
.endm
/**
* @brief restore vector extension hardware states
*
* @param dst register storing bottom of storage block
*
*/
.macro RESTORE_VECTOR, dst
// restore vector registers first since it will modify vector states
mv t0, \dst
addi t1, t0, VEC_FRAME_V0
VEC_CONFIG_SETVLI(t2, x0, VEC_IMM_SEW_8, VEC_IMM_LMUL_8)
vle8.v v0, (t1)
add t1, t1, t2
vle8.v v8, (t1)
add t1, t1, t2
vle8.v v16, (t1)
add t1, t1, t2
vle8.v v24, (t1)
mv t1, t0
LOAD t0, VEC_FRAME_VSTART(t1)
csrw vstart, t0
LOAD t0, VEC_FRAME_VCSR(t1)
csrw vcsr, t0
LOAD t0, VEC_FRAME_VTYPE(t1)
LOAD t3, VEC_FRAME_VL(t1)
VEC_CONFIG_SET_VL_VTYPE(t3, t0)
.endm
#endif
#endif /* __RVV_CONTEXT_H__ */

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-10 RT-Thread the first version,
* compatible to riscv-v-spec-1.0
*/
#ifndef __VECTOR_ENCODING_1_0_H__
#define __VECTOR_ENCODING_1_0_H__
/* mstatus/sstatus */
#define MSTATUS_VS 0x00000600
#define SSTATUS_VS 0x00000600 /* Vector Status */
#define SSTATUS_VS_INITIAL 0x00000200
#define SSTATUS_VS_CLEAN 0x00000400
#define SSTATUS_VS_DIRTY 0x00000600
#ifdef __ASSEMBLY__
/**
* assembler names used for vset{i}vli vtypei immediate
*/
#define VEC_IMM_SEW_8 e8
#define VEC_IMM_SEW_16 e16
#define VEC_IMM_SEW_32 e32
#define VEC_IMM_SEW_64 e64
/* group setting, encoding by multiplier */
#define VEC_IMM_LMUL_F8 mf8
#define VEC_IMM_LMUL_F4 mf4
#define VEC_IMM_LMUL_F2 mf2
#define VEC_IMM_LMUL_1 m1
#define VEC_IMM_LMUL_2 m2
#define VEC_IMM_LMUL_4 m4
#define VEC_IMM_LMUL_8 m8
/* TAIL & MASK agnostic bits */
#define VEC_IMM_TAIL_AGNOSTIC ta
#define VEC_IMM_MASK_AGNOSTIC ma
#define VEC_IMM_TAMA VEC_IMM_TAIL_AGNOSTIC, VEC_IMM_MASK_AGNOSTIC
#define VEC_IMM_TAMU VEC_IMM_TAIL_AGNOSTIC
#define VEC_IMM_TUMA VEC_IMM_MASK_AGNOSTIC
/**
* configuration setting instruction
*/
#define VEC_CONFIG_SETVLI(xVl, xAvl, vtype...) vsetvli xVl, xAvl, ##vtype
#define VEC_CONFIG_SET_VL_VTYPE(xVl, xVtype) vsetvl x0, xVl, xVtype
#endif
#endif /* __VECTOR_ENCODING_H__ */

View File

@ -0,0 +1,10 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-29 lizhirui first version
*/
#include <rthw.h>
#include <rtdef.h>
#include <board.h>
#include <riscv.h>
#include <cache.h>
rt_inline rt_uint32_t rt_cpu_icache_line_size()
{
return 0;
}
rt_inline rt_uint32_t rt_cpu_dcache_line_size()
{
return 0;
}
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_INVALIDATE)
{
rt_hw_cpu_icache_invalidate(addr, size);
}
}
void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_FLUSH)
{
rt_hw_cpu_dcache_clean(addr, size);
}
else
{
rt_hw_cpu_dcache_invalidate(addr, size);
}
}
rt_base_t rt_hw_cpu_icache_status_local()
{
return 0;
}
rt_base_t rt_hw_cpu_dcache_status()
{
return 0;
}
void rt_hw_sync_cache_local(void *addr, int size)
{
}

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-09 RT-Thread The first version
*/
#ifndef __CACHE_H__
#define __CACHE_H__
#include <rtdef.h>
/**
* @brief These APIs may not be supported by a specified architecture
* But we have to include to all the cases to be 'general purpose'
*/
rt_always_inline void rt_hw_cpu_dcache_clean_local(void *addr, int size)
{
RT_UNUSED(addr);
RT_UNUSED(size);
}
rt_always_inline void rt_hw_cpu_dcache_invalidate_local(void *addr, int size)
{
RT_UNUSED(addr);
RT_UNUSED(size);
}
rt_always_inline void rt_hw_cpu_dcache_clean_and_invalidate_local(void *addr, int size)
{
RT_UNUSED(addr);
RT_UNUSED(size);
}
rt_always_inline void rt_hw_cpu_dcache_clean_all_local(void)
{
}
rt_always_inline void rt_hw_cpu_dcache_invalidate_all_local(void)
{
}
rt_always_inline void rt_hw_cpu_dcache_clean_and_invalidate_all_local(void)
{
}
rt_always_inline void rt_hw_cpu_icache_invalidate_local(void *addr, int size)
{
RT_UNUSED(addr);
RT_UNUSED(size);
}
rt_always_inline void rt_hw_cpu_icache_invalidate_all_local(void)
{
}
/**
* @brief Multi-core
*/
#define rt_hw_cpu_dcache_clean rt_hw_cpu_dcache_clean_local
#define rt_hw_cpu_dcache_invalidate rt_hw_cpu_dcache_invalidate_local
#define rt_hw_cpu_dcache_clean_and_invalidate rt_hw_cpu_dcache_clean_and_invalidate_local
#define rt_hw_cpu_dcache_clean_all rt_hw_cpu_dcache_clean_all_local
#define rt_hw_cpu_dcache_invalidate_all rt_hw_cpu_dcache_invalidate_all_local
#define rt_hw_cpu_dcache_clean_and_invalidate_all rt_hw_cpu_dcache_clean_and_invalidate_all_local
#define rt_hw_cpu_icache_invalidate rt_hw_cpu_icache_invalidate_local
#define rt_hw_cpu_icache_invalidate_all rt_hw_cpu_icache_invalidate_all_local
#define rt_hw_icache_invalidate_all rt_hw_cpu_icache_invalidate_all
/** instruction barrier */
static inline void rt_hw_cpu_sync(void) {}
/**
* @brief local cpu icahce & dcache synchronization
*
* @param addr
* @param size
*/
void rt_hw_sync_cache_local(void *addr, int size);
#endif /* __CACHE_H__ */

View File

@ -0,0 +1,94 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/01 Bernard The first version
* 2018/12/27 Jesven Change irq enable/disable to cpu0
*/
#include <plic.h>
#include <mmu.h>
#include "tick.h"
#include "encoding.h"
#include "riscv.h"
#include "interrupt.h"
struct rt_irq_desc irq_desc[MAX_HANDLERS];
static rt_isr_handler_t rt_hw_interrupt_handle(rt_uint32_t vector, void *param)
{
rt_kprintf("UN-handled interrupt %d occurred!!!\n", vector);
return RT_NULL;
}
int rt_hw_plic_irq_enable(int irq_number)
{
plic_irq_enable(irq_number);
return 0;
}
int rt_hw_plic_irq_disable(int irq_number)
{
plic_irq_disable(irq_number);
return 0;
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
plic_set_priority(vector, 1);
rt_hw_plic_irq_enable(vector);
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if(vector < MAX_HANDLERS)
{
old_handler = irq_desc[vector].handler;
if (handler != RT_NULL)
{
irq_desc[vector].handler = (rt_isr_handler_t)handler;
irq_desc[vector].param = param;
#ifdef RT_USING_INTERRUPT_INFO
rt_snprintf(irq_desc[vector].name, RT_NAME_MAX - 1, "%s", name);
irq_desc[vector].counter = 0;
#endif
}
}
return old_handler;
}
void rt_hw_interrupt_init()
{
/* Enable machine external interrupts. */
// set_csr(sie, SIP_SEIP);
int idx = 0;
/* init exceptions table */
for (idx = 0; idx < MAX_HANDLERS; idx++)
{
irq_desc[idx].handler = (rt_isr_handler_t)rt_hw_interrupt_handle;
irq_desc[idx].param = RT_NULL;
#ifdef RT_USING_INTERRUPT_INFO
rt_snprintf(irq_desc[idx].name, RT_NAME_MAX - 1, "default");
irq_desc[idx].counter = 0;
#endif
}
plic_set_threshold(0);
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-20 bigmagic The first version
*/
#ifndef INTERRUPT_H__
#define INTERRUPT_H__
#define MAX_HANDLERS 128
#include <rthw.h>
#include "stack.h"
enum
{
EP_INSTRUCTION_ADDRESS_MISALIGNED = 0,
EP_INSTRUCTION_ACCESS_FAULT,
EP_ILLEGAL_INSTRUCTION,
EP_BREAKPOINT,
EP_LOAD_ADDRESS_MISALIGNED,
EP_LOAD_ACCESS_FAULT,
EP_STORE_ADDRESS_MISALIGNED,
EP_STORE_ACCESS_FAULT,
EP_ENVIRONMENT_CALL_U_MODE,
EP_ENVIRONMENT_CALL_S_MODE,
EP_RESERVED10,
EP_ENVIRONMENT_CALL_M_MODE,
EP_INSTRUCTION_PAGE_FAULT, /* page attr */
EP_LOAD_PAGE_FAULT, /* read data */
EP_RESERVED14,
EP_STORE_PAGE_FAULT, /* write data */
};
int rt_hw_plic_irq_enable(int irq_number);
int rt_hw_plic_irq_disable(int irq_number);
void rt_hw_interrupt_init(void);
void rt_hw_interrupt_mask(int vector);
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler, void *param, const char *name);
void handle_trap(rt_ubase_t xcause, rt_ubase_t xtval, rt_ubase_t xepc, struct rt_hw_stack_frame *sp);
#endif

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-09 Shell Add portable asm support
*/
#ifndef __OPCODE_H__
#define __OPCODE_H__
/**
* @brief binary opcode pseudo operations
* Used to bypass toolchain restriction on extension ISA
*
*/
/**
* @brief RISC-V instruction formats
*/
/**
* R type: .insn r opcode6, func3, func7, rd, rs1, rs2
*
* +-------+-----+-----+-------+----+---------+
* | func7 | rs2 | rs1 | func3 | rd | opcode6 |
* +-------+-----+-----+-------+----+---------+
* 31 25 20 15 12 7 0
*/
#define __OPC_INSN_FORMAT_R(opcode, func3, func7, rd, rs1, rs2) \
".insn r "RT_STRINGIFY(opcode)","RT_STRINGIFY(func3)","RT_STRINGIFY(func7)","RT_STRINGIFY(rd)","RT_STRINGIFY(rs1)","RT_STRINGIFY(rs2)
#ifdef _TOOLCHAIN_SUPP_ZIFENCEI_ISA_
#define OPC_FENCE_I "fence.i"
#else /* !_TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
#define OPC_FENCE_I ".long 0x0000100F"
#endif /* _TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
#endif /* __OPCODE_H__ */

View File

@ -0,0 +1,156 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-20 bigmagic first version
* 2022-09-16 WangXiaoyao Porting to rv64
*/
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#include "plic.h"
#include <riscv_io.h>
#include "encoding.h"
#include <interrupt.h>
#include <riscv.h>
#include <string.h>
#include <stdlib.h>
#ifdef RT_USING_SMART
#include <ioremap.h>
#else
#define rt_ioremap(addr, ...) (addr)
#endif
size_t plic_base = 0x0c000000L;
/*
* Each PLIC interrupt source can be assigned a priority by writing
* to its 32-bit memory-mapped priority register.
* The QEMU-virt (the same as FU540-C000) supports 7 levels of priority.
* A priority value of 0 is reserved to mean "never interrupt" and
* effectively disables the interrupt.
* Priority 1 is the lowest active priority, and priority 7 is the highest.
* Ties between global interrupts of the same priority are broken by
* the Interrupt ID; interrupts with the lowest ID have the highest
* effective priority.
*/
void plic_set_priority(int irq, int priority)
{
*(uint32_t *)PLIC_PRIORITY(irq) = priority;
}
/*
* Each global interrupt can be enabled by setting the corresponding
* bit in the enables registers.
*/
void plic_irq_enable(int irq)
{
int hart = __raw_hartid();
*(uint32_t *)PLIC_ENABLE(hart) = ((*(uint32_t *)PLIC_ENABLE(hart)) | (1 << irq));
#ifdef RISCV_VIRT64_S_MODE
set_csr(sie, read_csr(sie) | MIP_SEIP);
#else
set_csr(mie, read_csr(mie) | MIP_MEIP);
#endif
}
void plic_irq_disable(int irq)
{
int hart = __raw_hartid();
*(uint32_t *)PLIC_ENABLE(hart) = (((*(uint32_t *)PLIC_ENABLE(hart)) & (~(1 << irq))));
}
/*
* PLIC will mask all interrupts of a priority less than or equal to threshold.
* Maximum threshold is 7.
* For example, a threshold value of zero permits all interrupts with
* non-zero priority, whereas a value of 7 masks all interrupts.
* Notice, the threshold is global for PLIC, not for each interrupt source.
*/
void plic_set_threshold(int threshold)
{
int hart = __raw_hartid();
*(uint32_t *)PLIC_THRESHOLD(hart) = threshold;
}
/*
* DESCRIPTION:
* Query the PLIC what interrupt we should serve.
* Perform an interrupt claim by reading the claim register, which
* returns the ID of the highest-priority pending interrupt or zero if there
* is no pending interrupt.
* A successful claim also atomically clears the corresponding pending bit
* on the interrupt source.
* RETURN VALUE:
* the ID of the highest-priority pending interrupt or zero if there
* is no pending interrupt.
*/
int plic_claim(void)
{
int hart = __raw_hartid();
int irq = *(uint32_t *)PLIC_CLAIM(hart);
return irq;
}
/*
* DESCRIPTION:
* Writing the interrupt ID it received from the claim (irq) to the
* complete register would signal the PLIC we've served this IRQ.
* The PLIC does not check whether the completion ID is the same as the
* last claim ID for that target. If the completion ID does not match an
* interrupt source that is currently enabled for the target, the completion
* is silently ignored.
* RETURN VALUE: none
*/
void plic_complete(int irq)
{
int hart = __raw_hartid();
*(uint32_t *)PLIC_COMPLETE(hart) = irq;
}
void plic_set_ie(rt_uint32_t word_index, rt_uint32_t val)
{
volatile void *plic_ie = (void *)(rt_ubase_t)(plic_base + PLIC_ENABLE_BASE + 0x80 + word_index * 4);
writel(val, plic_ie);
}
static void _set_sie(int hartid)
{
for (size_t i = hartid * WORD_CNT_BYTE; i < 32; i++)
plic_set_ie(i, 0xffffffff);
}
void plic_init()
{
// PLIC takes up 64 MB space
plic_base = (size_t)rt_ioremap((void *)plic_base, 64 * 1024 * 1024);
plic_set_threshold(0);
for (int i = 0; i < CONFIG_IRQ_NR; i++)
{
plic_set_priority(i, 1);
}
// in a single core system, only current context was set
_set_sie(__raw_hartid());
}
extern struct rt_irq_desc irq_desc[MAX_HANDLERS];
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
* that source ID back to the same claim register. This automatically enables
* and disables the interrupt, so there's nothing else to do.
*/
void plic_handle_irq(void)
{
int plic_irq = plic_claim();
plic_complete(plic_irq);
irq_desc[plic_irq].handler(plic_irq, irq_desc[plic_irq].param);
}

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-20 bigmagic first version
* 2021-10-20 bernard fix s-mode issue
*/
#ifndef __PLIC_H__
#define __PLIC_H__
#include <rtconfig.h>
#include <rthw.h>
#define PLIC_PRIORITY_BASE 0x0
#define PLIC_PENDING_BASE 0x1000
#define PLIC_ENABLE_BASE 0x2000
#define PLIC_CONTEXT_BASE 0x200000
extern size_t plic_base;
#define VIRT_PLIC_BASE (plic_base)
#define PLIC_PRIORITY_OFFSET (0x0)
#define PLIC_PENDING_OFFSET (0x1000)
#define PLIC_ENABLE_STRIDE 0x80
#define PLIC_CONTEXT_STRIDE 0x1000
/* RT-Thread runs in S-mode on virt64 by default */
#define RISCV_VIRT64_S_MODE
#ifndef RISCV_VIRT64_S_MODE
#define PLIC_MENABLE_OFFSET (0x2000)
#define PLIC_MTHRESHOLD_OFFSET (0x200000)
#define PLIC_MCLAIM_OFFSET (0x200004)
#define PLIC_MCOMPLETE_OFFSET (0x200004)
#define PLIC_ENABLE(hart) (VIRT_PLIC_BASE + PLIC_MENABLE_OFFSET + (hart * 2) * PLIC_ENABLE_STRIDE)
#define PLIC_THRESHOLD(hart) (VIRT_PLIC_BASE + PLIC_MTHRESHOLD_OFFSET + (hart * 2) * PLIC_CONTEXT_STRIDE)
#define PLIC_CLAIM(hart) (VIRT_PLIC_BASE + PLIC_MCLAIM_OFFSET + (hart * 2) * PLIC_CONTEXT_STRIDE)
#define PLIC_COMPLETE(hart) (VIRT_PLIC_BASE + PLIC_MCOMPLETE_OFFSET + (hart * 2) * PLIC_CONTEXT_STRIDE)
#else
#define PLIC_SENABLE_OFFSET (0x2000 + PLIC_ENABLE_STRIDE)
#define PLIC_STHRESHOLD_OFFSET (0x200000 + PLIC_CONTEXT_STRIDE)
#define PLIC_SCLAIM_OFFSET (0x200004 + PLIC_CONTEXT_STRIDE)
#define PLIC_SCOMPLETE_OFFSET (0x200004 + PLIC_CONTEXT_STRIDE)
#define PLIC_ENABLE(hart) (VIRT_PLIC_BASE + PLIC_SENABLE_OFFSET + (hart * 2) * PLIC_ENABLE_STRIDE)
#define PLIC_THRESHOLD(hart) (VIRT_PLIC_BASE + PLIC_STHRESHOLD_OFFSET + (hart * 2) * PLIC_CONTEXT_STRIDE)
#define PLIC_CLAIM(hart) (VIRT_PLIC_BASE + PLIC_SCLAIM_OFFSET + (hart * 2) * PLIC_CONTEXT_STRIDE)
#define PLIC_COMPLETE(hart) (VIRT_PLIC_BASE + PLIC_SCOMPLETE_OFFSET + (hart * 2) * PLIC_CONTEXT_STRIDE)
#endif
#define PLIC_PRIORITY(id) (VIRT_PLIC_BASE + PLIC_PRIORITY_OFFSET + (id) * 4)
#define PLIC_PENDING(id) (VIRT_PLIC_BASE + PLIC_PENDING_OFFSET + ((id) / 32))
#define WORD_CNT_BYTE (1024 / 8)
/* IRQ config in system, max 1024 (from 0 to 1023) */
#define CONFIG_IRQ_NR (128)
#define CONFIG_IRQ_WORD (CONFIG_IRQ_NR / 32)
void plic_set_priority(int irq, int priority);
void plic_irq_enable(int irq);
void plic_irq_disable(int irq);
void plic_set_threshold(int mthreshold);
int plic_claim(void);
void plic_complete(int irq);
void plic_set_thresh(rt_uint32_t val);
void plic_set_ie(rt_uint32_t word_index,rt_uint32_t val);
void plic_init();
void plic_handle_irq(void);
#endif

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2023-10-12 Shell Add permission control API
*/
#ifndef __RISCV_MMU_H__
#define __RISCV_MMU_H__
#include <rtthread.h>
#include <rthw.h>
#include "riscv.h"
#undef PAGE_SIZE
#define PAGE_OFFSET_SHIFT 0
#define PAGE_OFFSET_BIT 12
#define PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
#define PAGE_OFFSET_MASK __MASK(PAGE_OFFSET_BIT)
#define VPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
#define VPN0_BIT 9
#define VPN1_SHIFT (VPN0_SHIFT + VPN0_BIT)
#define VPN1_BIT 9
#define VPN2_SHIFT (VPN1_SHIFT + VPN1_BIT)
#define VPN2_BIT 9
#define PPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
#define PPN0_BIT 9
#define PPN1_SHIFT (PPN0_SHIFT + PPN0_BIT)
#define PPN1_BIT 9
#define PPN2_SHIFT (PPN1_SHIFT + PPN1_BIT)
#define PPN2_BIT 26
#define PPN_BITS (PPN0_BIT + PPN1_BIT + PPN2_BIT)
#define L1_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT + VPN1_BIT)
#define L2_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT)
#define L3_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
#define ARCH_ADDRESS_WIDTH_BITS 64
#define PHYSICAL_ADDRESS_WIDTH_BITS 56
#define PAGE_ATTR_NEXT_LEVEL (0)
#define PAGE_ATTR_RWX (PTE_X | PTE_W | PTE_R)
#define PAGE_ATTR_READONLY (PTE_R)
#define PAGE_ATTR_READEXECUTE (PTE_X | PTE_R)
#define PAGE_ATTR_USER (PTE_U)
#define PAGE_ATTR_SYSTEM (0)
#define PAGE_DEFAULT_ATTR_LEAF (PAGE_ATTR_RWX | PAGE_ATTR_USER | PTE_V | PTE_G)
#define PAGE_DEFAULT_ATTR_NEXT (PAGE_ATTR_NEXT_LEVEL | PTE_V | PTE_G)
#define PAGE_IS_LEAF(pte) __MASKVALUE(pte, PAGE_ATTR_RWX)
#define PTE_USED(pte) __MASKVALUE(pte, PTE_V)
/**
* encoding of SATP (Supervisor Address Translation and Protection register)
*/
#define SATP_MODE_OFFSET 60
#define SATP_MODE_BARE 0
#define SATP_MODE_SV39 8
#define SATP_MODE_SV48 9
#define SATP_MODE_SV57 10
#define SATP_MODE_SV64 11
#define ARCH_VADDR_WIDTH 39
#define SATP_MODE SATP_MODE_SV39
#define MMU_MAP_K_DEVICE (PTE_G | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_K_RWCB (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_K_RW (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RWCB (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RWCB_XN (PTE_U | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RW (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_EARLY (PAGE_ATTR_RWX | PTE_G | PTE_V)
#define PTE_XWR_MASK 0xe
#define ARCH_PAGE_SIZE PAGE_SIZE
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
#define ARCH_PAGE_SHIFT PAGE_OFFSET_BIT
#define ARCH_INDEX_WIDTH 9
#define ARCH_INDEX_SIZE (1ul << ARCH_INDEX_WIDTH)
#define ARCH_INDEX_MASK (ARCH_INDEX_SIZE - 1)
#define ARCH_MAP_FAILED ((void *)-1)
void mmu_set_pagetable(rt_ubase_t addr);
void mmu_enable_user_page_access(void);
void mmu_disable_user_page_access(void);
#define RT_HW_MMU_PROT_READ 1
#define RT_HW_MMU_PROT_WRITE 2
#define RT_HW_MMU_PROT_EXECUTE 4
#define RT_HW_MMU_PROT_KERNEL 8
#define RT_HW_MMU_PROT_USER 16
#define RT_HW_MMU_PROT_CACHE 32
void rt_hw_asid_init(void);
struct rt_aspace;
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl);
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr &= ~PTE_W;
break;
/* remove write permission for kernel */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
attr &= ~PTE_W;
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* add write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr |= (PTE_R | PTE_W | PTE_U);
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
{
rt_bool_t rc = 0;
switch (prot & ~RT_HW_MMU_PROT_USER)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE:
rc = ((attr & PTE_W) && (attr & PTE_R));
break;
case RT_HW_MMU_PROT_READ:
rc = !!(attr & PTE_R);
break;
case RT_HW_MMU_PROT_EXECUTE:
rc = !!(attr & PTE_X);
break;
default:
RT_ASSERT(0);
}
if (rc && (prot & RT_HW_MMU_PROT_USER))
{
rc = !!(attr & PTE_U);
}
return rc;
}
#endif

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-07 RT-Thread the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include "board.h"
void init_bss(void)
{
unsigned int *dst;
dst = &__bss_start;
while (dst < &__bss_end)
{
*dst++ = 0;
}
}