原始版本

This commit is contained in:
冯佳
2025-06-19 21:56:46 +08:00
parent fe98e5f010
commit a4841450cf
4152 changed files with 1910684 additions and 0 deletions

309
RT_Thread/libcpu/Kconfig Normal file
View File

@ -0,0 +1,309 @@
if ARCH_ARMV8 && ARCH_CPU_64BIT
orsource "./aarch64/Kconfig"
endif
config ARCH_CPU_64BIT
bool
config RT_USING_CACHE
bool
default n
config RT_USING_HW_ATOMIC
bool
default n
config ARCH_CPU_BIG_ENDIAN
bool
config ARCH_ARM_BOOTWITH_FLUSH_CACHE
bool
default n
config ARCH_CPU_STACK_GROWS_UPWARD
bool
default n
config RT_USING_CPU_FFS
bool
default n
config ARCH_MM_MMU
bool
config ARCH_MM_MPU
bool
config ARCH_ARM
bool
config ARCH_ARM_CORTEX_M
bool
select ARCH_ARM
config ARCH_ARM_CORTEX_R
bool
select ARCH_ARM
config ARCH_ARM_CORTEX_FPU
bool
config ARCH_ARM_CORTEX_SECURE
bool
config ARCH_ARM_CORTEX_M0
bool
select ARCH_ARM_CORTEX_M
config ARCH_ARM_CORTEX_M3
bool
select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC
config ARCH_ARM_MPU
bool
depends on ARCH_ARM
select ARCH_MM_MPU
config ARCH_ARM_CORTEX_M4
bool
select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC
config ARCH_ARM_CORTEX_M7
bool
select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS
select RT_USING_CACHE
select RT_USING_HW_ATOMIC
config ARCH_ARM_CORTEX_M85
bool
select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC
config ARCH_ARM_CORTEX_M23
bool
select ARCH_ARM_CORTEX_M
select RT_USING_HW_ATOMIC
config ARCH_ARM_CORTEX_M33
bool
select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC
config ARCH_ARM_CORTEX_R
bool
select ARCH_ARM
select RT_USING_HW_ATOMIC
config ARCH_ARM_CORTEX_R52
bool
select ARCH_ARM_CORTEX_R
config ARCH_ARM_MMU
bool
select RT_USING_CACHE
select ARCH_MM_MMU
depends on ARCH_ARM
if RT_USING_SMART
config KERNEL_VADDR_START
hex "The virtural address of kernel start"
default 0xffff000000000000 if ARCH_ARMV8
default 0xc0000000 if ARCH_ARM
default 0xffffffc000000000 if ARCH_RISCV && ARCH_REMAP_KERNEL
default 0x80000000 if ARCH_RISCV
depends on ARCH_MM_MMU
config RT_IOREMAP_LATE
bool "Support to create IO mapping in the kernel address space after system initlalization."
default n
depends on ARCH_ARM_CORTEX_A
depends on ARCH_MM_MMU
endif
config ARCH_ARM_ARM9
bool
select ARCH_ARM
config ARCH_ARM_ARM11
bool
select ARCH_ARM
config ARCH_ARM_CORTEX_A
bool
select ARCH_ARM
select ARCH_ARM_MMU
select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC
if ARCH_ARM_CORTEX_A
config RT_SMP_AUTO_BOOT
bool
default n
config RT_USING_GIC_V2
bool
default n
config RT_USING_GIC_V3
bool
default n
config RT_NO_USING_GIC
bool
default y if !RT_USING_GIC_V2 && !RT_USING_GIC_V3
endif
config ARCH_ARM_CORTEX_A5
bool
select ARCH_ARM_CORTEX_A
config ARCH_ARM_CORTEX_A7
bool
select ARCH_ARM_CORTEX_A
config ARCH_ARM_CORTEX_A8
bool
select ARCH_ARM_CORTEX_A
config ARCH_ARM_CORTEX_A9
bool
select ARCH_ARM_CORTEX_A
config ARCH_ARM_CORTEX_A55
bool
select ARCH_ARM_CORTEX_A
config ARCH_ARM_SECURE_MODE
bool "Running in secure mode [ARM Cortex-A]"
default n
depends on ARCH_ARM_CORTEX_A
config RT_BACKTRACE_FUNCTION_NAME
bool "To show function name when backtrace."
default n
depends on ARCH_ARM_CORTEX_A
config ARCH_ARMV8
bool
select ARCH_ARM
select ARCH_ARM_MMU
select RT_USING_CPU_FFS
select ARCH_USING_ASID
select ARCH_USING_IRQ_CTX_LIST
config ARCH_MIPS
bool
config ARCH_MIPS64
bool
select ARCH_CPU_64BIT
config ARCH_MIPS_XBURST
bool
select ARCH_MIPS
config ARCH_ANDES
bool
config ARCH_CSKY
bool
config ARCH_POWERPC
bool
config ARCH_RISCV
bool
config ARCH_RISCV_FPU
bool
config ARCH_RISCV_VECTOR
bool
if ARCH_RISCV_VECTOR
choice
prompt "RISCV Vector Vlen"
default ARCH_VECTOR_VLEN_128
config ARCH_VECTOR_VLEN_128
bool "128"
config ARCH_VECTOR_VLEN_256
bool "256"
endchoice
endif
config ARCH_RISCV_FPU_S
select ARCH_RISCV_FPU
bool
config ARCH_RISCV_FPU_D
select ARCH_RISCV_FPU
bool
config ARCH_RISCV32
select ARCH_RISCV
bool
config ARCH_RISCV64
select ARCH_RISCV
select ARCH_CPU_64BIT
bool
if ARCH_RISCV64
config ARCH_USING_NEW_CTX_SWITCH
bool
default y
config ARCH_USING_RISCV_COMMON64
bool
depends on ARCH_RISCV64
select RT_USING_CPUTIME
select ARCH_USING_NEW_CTX_SWITCH
help
Using the common64 implementation under ./libcpu/risc-v
endif
config ARCH_REMAP_KERNEL
bool
depends on RT_USING_SMART
help
Remapping kernel image to high virtual address region
config ARCH_USING_ASID
bool
depends on RT_USING_SMART
help
Using ASID support from architecture
config ARCH_IA32
bool
config ARCH_TIDSP
bool
config ARCH_TIDSP_C28X
bool
select ARCH_TIDSP
select ARCH_CPU_STACK_GROWS_UPWARD
config ARCH_HOST_SIMULATOR
bool
config ARCH_CPU_STACK_GROWS_UPWARD
bool
default n
config ARCH_USING_HW_THREAD_SELF
bool
default n
config ARCH_USING_IRQ_CTX_LIST
bool
default n

View File

@ -0,0 +1,15 @@
# RT-Thread building script for bridge
import os
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
group = []
list = os.listdir(cwd)
if rtconfig.ARCH in list:
group = group + SConscript(os.path.join(rtconfig.ARCH, 'SConscript'))
Return('group')

1
RT_Thread/libcpu/aarch64/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
link.lds

View File

@ -0,0 +1,25 @@
menu "AArch64 Architecture Configuration"
config ARCH_TEXT_OFFSET
hex "Text offset"
default 0x200000
config ARCH_RAM_OFFSET
hex "RAM offset"
default 0
config ARCH_SECONDARY_CPU_STACK_SIZE
int "Secondary CPU stack size"
default 4096
config ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
default y
config ARCH_USING_GENERIC_CPUID
bool "Using generic cpuid implemenation"
select ARCH_USING_HW_THREAD_SELF
default y if RT_USING_OFW
default n
config ARCH_HEAP_SIZE
hex "Size of system heap"
default 0x4000000
config ARCH_INIT_PAGE_SIZE
hex "Size of init page region"
default 0x200000
endmenu

View File

@ -0,0 +1,27 @@
# RT-Thread building script for bridge
import os
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
group = []
list = os.listdir(cwd)
bsp_path = Dir('#').abspath
if not os.path.exists(bsp_path + "/link.lds"):
Env['LINKFLAGS'] = Env['LINKFLAGS'].replace('link.lds', cwd + "/link.lds")
Preprocessing("link.lds.S", ".lds", CPPPATH=[bsp_path])
# fix the linker with crtx.o
Env['LINKFLAGS'] += ' -nostartfiles'
# add common code files
group = group + SConscript(os.path.join('common', 'SConscript'))
# cpu porting code files
if rtconfig.CPU != 'common':
group = group + SConscript(os.path.join(rtconfig.CPU, 'SConscript'))
Return('group')

View File

@ -0,0 +1,40 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*.S')
CPPPATH = [cwd, cwd + '/include']
if GetDepend('RT_USING_SMP'):
core_model = 'mp'
else:
core_model = 'up'
src += Glob(core_model + '/*.S')
if GetDepend('RT_USING_OFW') == False:
SrcRemove(src, ['setup.c', 'cpu_psci.c', 'psci.c'])
if GetDepend('RT_USING_PIC') == True:
SrcRemove(src, ['gicv3.c', 'gic.c', 'gtimer.c', 'interrupt.c'])
if GetDepend('RT_HWTIMER_ARM_ARCH') == True:
SrcRemove(src, ['gtimer.c'])
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
# build for sub-directory
list = os.listdir(cwd)
objs = []
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
group = group + objs
Return('group')

View File

@ -0,0 +1,112 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-05-18 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtatomic.h>
rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
{
rt_atomic_t ret;
__asm__ volatile (
" ldr %0, %1\n"
" dmb ish"
: "=r" (ret)
: "Q" (*ptr)
: "memory");
return ret;
}
void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
__asm__ volatile (
" str %1, %0\n"
" dmb ish"
: "=Q" (*ptr)
: "r" (val)
: "memory");
}
#define AARCH64_ATOMIC_OP_RETURN(op, ins, constraint) \
rt_atomic_t rt_hw_atomic_##op(volatile rt_atomic_t *ptr, rt_atomic_t in_val) \
{ \
rt_atomic_t tmp, val, result; \
\
__asm__ volatile ( \
" prfm pstl1strm, %3\n" \
"1: ldxr %0, %3\n" \
" "#ins " %1, %0, %4\n" \
" stlxr %w2, %1, %3\n" \
" cbnz %w2, 1b\n" \
" dmb ish" \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (*ptr) \
: __RT_STRINGIFY(constraint) "r" (in_val) \
: "memory"); \
\
return result; \
}
AARCH64_ATOMIC_OP_RETURN(add, add, I)
AARCH64_ATOMIC_OP_RETURN(sub, sub, J)
AARCH64_ATOMIC_OP_RETURN(and, and, K)
AARCH64_ATOMIC_OP_RETURN(or, orr, K)
AARCH64_ATOMIC_OP_RETURN(xor, eor, K)
rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t ret, tmp;
__asm__ volatile (
" prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
" dmb ish"
: "=&r" (ret), "=&r" (tmp), "+Q" (*ptr)
: "r" (val)
: "memory");
return ret;
}
void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
{
rt_hw_atomic_and(ptr, 0);
}
rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
{
return rt_hw_atomic_or(ptr, 1);
}
rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t new)
{
rt_atomic_t tmp, oldval;
__asm__ volatile (
" prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n"
" eor %1, %0, %3\n"
" cbnz %1, 2f\n"
" stlxr %w1, %4, %2\n"
" cbnz %w1, 1b\n"
" dmb ish\n"
" mov %w1, #1\n"
" b 3f\n"
"2: str %0, [%5]\n"
" mov %w1, #0\n"
"3:"
: "=&r" (oldval), "=&r" (tmp), "+Q" (*ptr)
: "Kr" (*old), "r" (new), "r" (old)
: "memory");
return tmp;
}

View File

@ -0,0 +1,138 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-06-02 Jesven the first version
* 2023-06-24 WangXiaoyao Support backtrace for non-active thread
* 2023-10-16 Shell Support a new backtrace framework
*/
#include <rtthread.h>
#include <rthw.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mm_aspace.h"
#include "mmu.h"
#define INST_WORD_BYTES 4
#define WORD sizeof(rt_base_t)
#define ARCH_CONTEXT_FETCH(pctx, id) (*(((unsigned long *)pctx) + (id)))
#define PTR_NORMALIZE(ptr) (ptr = rt_backtrace_ptr_normalize(ptr))
rt_weak void *rt_backtrace_ptr_normalize(void *ptr)
{
return ptr;
}
rt_inline rt_err_t _bt_kaddr(rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
PTR_NORMALIZE(fp);
frame->fp = *fp;
frame->pc = *(fp + 1) - INST_WORD_BYTES;
if ((rt_ubase_t)fp == frame->fp)
{
rc = -RT_ERROR;
}
else
{
rc = RT_EOK;
}
return rc;
}
#ifdef RT_USING_SMART
#include <lwp_user_mm.h>
rt_inline rt_err_t _bt_uaddr(rt_lwp_t lwp, rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
if (lwp_data_get(lwp, &frame->fp, fp, WORD) != WORD)
{
rc = -RT_EFAULT;
}
else if (lwp_data_get(lwp, &frame->pc, fp + 1, WORD) != WORD)
{
rc = -RT_EFAULT;
}
else if ((rt_base_t)fp == frame->fp)
{
rc = -RT_ERROR;
}
else
{
frame->pc -= INST_WORD_BYTES;
rc = RT_EOK;
}
return rc;
}
#endif /* RT_USING_SMART */
rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc = -RT_ERROR;
rt_ubase_t *fp = (rt_ubase_t *)frame->fp;
if (fp && !((long)fp & 0x7))
{
#ifdef RT_USING_SMART
#define IN_USER_SPACE(addr) ((rt_ubase_t)(addr) >= USER_VADDR_START && (rt_ubase_t)(addr) < USER_VADDR_TOP)
if (thread && thread->lwp && rt_scheduler_is_available())
{
rt_lwp_t lwp = thread->lwp;
void *this_lwp = lwp_self();
if ((!IN_USER_SPACE(fp) || this_lwp == lwp) && rt_kmem_v2p(fp) != ARCH_MAP_FAILED)
{
rc = _bt_kaddr(fp, frame);
}
else if (lwp_user_accessible_ext(lwp, fp, sizeof(rt_base_t)))
{
rc = _bt_uaddr(lwp, fp, frame);
}
else
{
rc = -RT_EFAULT;
}
}
else
#endif
if (rt_kmem_v2p(fp) != ARCH_MAP_FAILED)
{
rc = _bt_kaddr(fp, frame);
}
else
{
rc = -RT_EFAULT;
}
}
else
{
rc = -RT_EFAULT;
}
return rc;
}
rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
if (!thread || !frame)
{
rc = -RT_EINVAL;
}
else
{
frame->pc = ARCH_CONTEXT_FETCH(thread->sp, 0);
frame->fp = ARCH_CONTEXT_FETCH(thread->sp, 4);
rc = RT_EOK;
}
return rc;
}

View File

@ -0,0 +1,204 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-03-17 bigmagic first version
*/
/*
* void __asm_dcache_level(level)
*
* flush or invalidate one level cache.
*
* x0: cache level
* x1: 0 clean & invalidate, 1 invalidate only
* x2~x9: clobbered
*/
.globl __asm_dcache_level
__asm_dcache_level:
lsl x12, x0, #1
msr csselr_el1, x12 /* select cache level */
isb /* sync change of cssidr_el1 */
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
add x2, x2, #4 /* x2 <- log2(cache line size) */
mov x3, #0x3ff
and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
clz w5, w3 /* bit position of #ways */
mov x4, #0x7fff
and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
/* x12 <- cache level << 1 */
/* x2 <- line length offset */
/* x3 <- number of cache ways - 1 */
/* x4 <- number of cache sets - 1 */
/* x5 <- bit position of #ways */
loop_set:
mov x6, x3 /* x6 <- working copy of #ways */
loop_way:
lsl x7, x6, x5
orr x9, x12, x7 /* map way and level to cisw value */
lsl x7, x4, x2
orr x9, x9, x7 /* map set number to cisw value */
tbz w1, #0, 1f
dc isw, x9
b 2f
1: dc cisw, x9 /* clean & invalidate by set/way */
2: subs x6, x6, #1 /* decrement the way */
b.ge loop_way
subs x4, x4, #1 /* decrement the set */
b.ge loop_set
ret
/*
* void __asm_flush_dcache_all(int invalidate_only)
*
* x0: 0 clean & invalidate, 1 invalidate only
*
* flush or invalidate all data cache by SET/WAY.
*/
.globl __asm_dcache_all
__asm_dcache_all:
mov x1, x0
dsb sy
mrs x10, clidr_el1 /* read clidr_el1 */
lsr x11, x10, #24
and x11, x11, #0x7 /* x11 <- loc */
cbz x11, finished /* if loc is 0, exit */
mov x15, lr
mov x0, #0 /* start flush at cache level 0 */
/* x0 <- cache level */
/* x10 <- clidr_el1 */
/* x11 <- loc */
/* x15 <- return address */
loop_level:
lsl x12, x0, #1
add x12, x12, x0 /* x0 <- tripled cache level */
lsr x12, x10, x12
and x12, x12, #7 /* x12 <- cache type */
cmp x12, #2
b.lt skip /* skip if no cache or icache */
bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
skip:
add x0, x0, #1 /* increment cache level */
cmp x11, x0
b.gt loop_level
mov x0, #0
msr csselr_el1, x0 /* restore csselr_el1 */
dsb sy
isb
mov lr, x15
finished:
ret
.globl __asm_flush_dcache_all
__asm_flush_dcache_all:
mov x0, #0
b __asm_dcache_all
.globl __asm_invalidate_dcache_all
__asm_invalidate_dcache_all:
mov x0, #0x1
b __asm_dcache_all
/*
* void __asm_flush_dcache_range(start, end)
*
* clean & invalidate data cache in the range
*
* x0: start address
* x1: end address
*/
.globl __asm_flush_dcache_range
__asm_flush_dcache_range:
mrs x3, ctr_el0
lsr x3, x3, #16
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc civac, x0 /* clean & invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
/* void __asm_invalidate_dcache_range(start, end)
*
* invalidate data cache in the range
*
* x0: start address
* x1: end address
*/
.globl __asm_invalidate_dcache_range
__asm_invalidate_dcache_range:
mrs x3, ctr_el0
lsr x3, x3, #16
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc ivac, x0 /* invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
/* void __asm_invalidate_icache_range(start, end)
*
* invalidate icache in the range
*
* x0: start address
* x1: end address
*/
.globl __asm_invalidate_icache_range
__asm_invalidate_icache_range:
mrs x3, ctr_el0
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: ic ivau, x0 /* invalidate instruction or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
/*
* void __asm_invalidate_icache_all(void)
*
* invalidate all tlb entries.
*/
.globl __asm_invalidate_icache_all
__asm_invalidate_icache_all:
dsb sy
ic ialluis
isb sy
ret
.globl __asm_flush_l3_cache
__asm_flush_l3_cache:
mov x0, #0 /* return status as success */
ret

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-03-29 quanzhao the first version
*/
#include <rthw.h>
#include <rtdef.h>
void __asm_invalidate_icache_all(void);
void __asm_flush_dcache_all(void);
void __asm_flush_dcache_range(rt_size_t start, rt_size_t end);
void __asm_invalidate_dcache_range(rt_size_t start, rt_size_t end);
void __asm_invalidate_icache_range(rt_size_t start, rt_size_t end);
void __asm_invalidate_dcache_all(void);
void __asm_invalidate_icache_all(void);
rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
{
return 0;
}
rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
{
return 0;
}
void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size)
{
__asm_invalidate_icache_range((rt_size_t)addr, (rt_size_t)addr + size);
}
void rt_hw_cpu_dcache_invalidate(void *addr, rt_size_t size)
{
__asm_invalidate_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
}
void rt_hw_cpu_dcache_clean(void *addr, rt_size_t size)
{
__asm_flush_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
}
void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, rt_size_t size)
{
__asm_flush_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
}
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_INVALIDATE)
{
rt_hw_cpu_icache_invalidate(addr, size);
}
}
void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_FLUSH)
{
rt_hw_cpu_dcache_clean(addr, size);
}
else if (ops == RT_HW_CACHE_INVALIDATE)
{
rt_hw_cpu_dcache_invalidate(addr, size);
}
}
rt_base_t rt_hw_cpu_icache_status(void)
{
return 0;
}
rt_base_t rt_hw_cpu_dcache_status(void)
{
return 0;
}

View File

@ -0,0 +1,243 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
* 2019-07-28 zdzn add smp support
* 2023-02-21 GuEe-GUI mov cpu ofw init to setup
* 2024-04-29 Shell Add generic ticket spinlock using C11 atomic
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#include <cpu.h>
#define DBG_TAG "libcpu.aarch64.cpu"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#ifdef RT_USING_SMP
#define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
#define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
#define cpuid_to_hwid(cpuid) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? rt_cpu_mpidr_early[cpuid] : ID_ERROR)
#define set_hwid(cpuid, hwid) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (rt_cpu_mpidr_early[cpuid] = (hwid)) : ID_ERROR)
#define get_cpu_node(cpuid) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? _cpu_node[cpuid] : NULL)
#define set_cpu_node(cpuid, node) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (_cpu_node[cpuid] = node) : NULL)
typedef rt_hw_spinlock_t arch_spinlock_t;
struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
#ifdef RT_USING_SMART
// _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] rt_weak = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
#else
/* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
{
[0] = 0x80000000,
[1] = 0x80000001,
[2] = 0x80000002,
[3] = 0x80000003,
[4] = 0x80000004,
[5] = 0x80000005,
[6] = 0x80000006,
[7] = 0x80000007,
[RT_CPUS_NR] = 0
};
#endif /* RT_USING_SMART */
/* in support of C11 atomic */
#if __STDC_VERSION__ >= 201112L
#include <stdatomic.h>
union _spinlock
{
_Atomic(rt_uint32_t) _value;
struct
{
_Atomic(rt_uint16_t) owner;
_Atomic(rt_uint16_t) next;
} ticket;
};
void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock)
{
union _spinlock *lock = (void *)_lock;
/**
* just a dummy note that this is an atomic operation, though it alway is
* even without usage of atomic API in arm64
*/
atomic_store_explicit(&lock->_value, 0, memory_order_relaxed);
}
rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *_lock)
{
rt_bool_t rc;
rt_uint32_t readonce;
union _spinlock temp;
union _spinlock *lock = (void *)_lock;
readonce = atomic_load_explicit(&lock->_value, memory_order_acquire);
temp._value = readonce;
if (temp.ticket.owner != temp.ticket.next)
{
rc = RT_FALSE;
}
else
{
temp.ticket.next += 1;
rc = atomic_compare_exchange_strong_explicit(
&lock->_value, &readonce, temp._value,
memory_order_acquire, memory_order_relaxed);
}
return rc;
}
rt_inline rt_base_t _load_acq_exclusive(_Atomic(rt_uint16_t) *halfword)
{
rt_uint32_t old;
__asm__ volatile("ldaxrh %w0, [%1]"
: "=&r"(old)
: "r"(halfword)
: "memory");
return old;
}
rt_inline void _send_event_local(void)
{
__asm__ volatile("sevl");
}
rt_inline void _wait_for_event(void)
{
__asm__ volatile("wfe" ::: "memory");
}
void rt_hw_spin_lock(rt_hw_spinlock_t *_lock)
{
union _spinlock *lock = (void *)_lock;
rt_uint16_t ticket =
atomic_fetch_add_explicit(&lock->ticket.next, 1, memory_order_relaxed);
if (atomic_load_explicit(&lock->ticket.owner, memory_order_acquire) !=
ticket)
{
_send_event_local();
do
{
_wait_for_event();
}
while (_load_acq_exclusive(&lock->ticket.owner) != ticket);
}
}
void rt_hw_spin_unlock(rt_hw_spinlock_t *_lock)
{
union _spinlock *lock = (void *)_lock;
atomic_fetch_add_explicit(&lock->ticket.owner, 1, memory_order_release);
}
#endif
static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
{
// load in cpu_hw_ids in cpuid_to_hwid,
// cpu_ops to cpu_ops_tbl
if (num_cpus > RT_CPUS_NR)
{
LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
num_cpus = RT_CPUS_NR;
}
for (int i = 0; i < num_cpus; i++)
{
set_hwid(i, cpu_hw_ids[i]);
cpu_ops_tbl[i] = cpu_ops[i];
}
return 0;
}
/** init cpu with hardcoded infomation or parsing from FDT */
static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
{
int retval;
// first setup cpu_ops_tbl and cpuid_to_hwid
if (num_cpus > 0)
retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
else
{
retval = -1;
}
if (retval)
return retval;
// using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
// assuming that cpuid 0 has already init
for (int i = 1; i < RT_CPUS_NR; i++)
{
if (rt_cpu_mpidr_early[i] == ID_ERROR)
{
LOG_E("Failed to find hardware id of CPU %d", i);
continue;
}
if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_init)
{
retval = cpu_ops_tbl[i]->cpu_init(i, RT_NULL);
CHECK_RETVAL(retval);
}
else
{
LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
, rt_cpu_mpidr_early[i], cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
}
}
return 0;
}
/**
* @brief boot cpu with hardcoded data
*
* @param num_cpus number of cpus
* @param cpu_hw_ids each element represents a hwid of cpu[i]
* @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
* @return int 0 on success,
*/
int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
{
int retval = 0;
if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
return -1;
retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
CHECK_RETVAL(retval);
return retval;
}
#endif /*RT_USING_SMP*/
/**
* @addtogroup ARM CPU
*/
/*@{*/
const char *rt_hw_cpu_arch(void)
{
return "aarch64";
}
/*@}*/

View File

@ -0,0 +1,338 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version (cpu_gcc.S)
* 2021-05-18 Jesven the first version (context_gcc.S)
* 2024-01-06 Shell Fix barrier on irq_disable/enable
* 2024-01-18 Shell fix implicit dependency of cpuid management
* 2024-03-28 Shell Move cpu codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "rtconfig.h"
#include "asm-generic.h"
#include "asm-fpu.h"
#include "armv8.h"
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif /* RT_USING_SMP */
.text
/**
* #ifdef RT_USING_OFW
* void rt_hw_cpu_id_set(long cpuid)
* #else
* void rt_hw_cpu_id_set(void)
* #endif
*/
.type rt_hw_cpu_id_set, @function
rt_hw_cpu_id_set:
#ifdef ARCH_USING_GENERIC_CPUID
.globl rt_hw_cpu_id_set
#else /* !ARCH_USING_GENERIC_CPUID */
.weak rt_hw_cpu_id_set
#endif /* ARCH_USING_GENERIC_CPUID */
#ifndef RT_USING_OFW
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
#ifdef ARCH_ARM_CORTEX_A55
lsr x0, x0, #8
#endif /* ARCH_ARM_CORTEX_A55 */
and x0, x0, #15
#endif /* !RT_USING_OFW */
#ifdef ARCH_USING_HW_THREAD_SELF
msr tpidrro_el0, x0
#else /* !ARCH_USING_HW_THREAD_SELF */
msr tpidr_el1, x0
#endif /* ARCH_USING_HW_THREAD_SELF */
ret
/*
int rt_hw_cpu_id(void)
*/
.type rt_hw_cpu_id, @function
rt_hw_cpu_id:
#ifdef ARCH_USING_GENERIC_CPUID
.globl rt_hw_cpu_id
#else /* !ARCH_USING_GENERIC_CPUID */
.weak rt_hw_cpu_id
#endif /* ARCH_USING_GENERIC_CPUID */
#if RT_CPUS_NR > 1
#ifdef ARCH_USING_GENERIC_CPUID
mrs x0, tpidrro_el0
#else /* !ARCH_USING_GENERIC_CPUID */
mrs x0, tpidr_el1
#endif /* ARCH_USING_GENERIC_CPUID */
#else /* RT_CPUS_NR == 1 */
mov x0, xzr
#endif
ret
/*
void rt_hw_set_process_id(size_t id)
*/
.global rt_hw_set_process_id
rt_hw_set_process_id:
msr CONTEXTIDR_EL1, x0
ret
/*
*enable gtimer
*/
.globl rt_hw_gtimer_enable
rt_hw_gtimer_enable:
mov x0, #1
msr CNTP_CTL_EL0, x0
ret
/*
*set gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_set_gtimer_val
rt_hw_set_gtimer_val:
msr CNTP_TVAL_EL0, x0
ret
/*
*get gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_get_gtimer_val
rt_hw_get_gtimer_val:
mrs x0, CNTP_TVAL_EL0
ret
.globl rt_hw_get_cntpct_val
rt_hw_get_cntpct_val:
mrs x0, CNTPCT_EL0
ret
/*
*get gtimer frq value
*/
.globl rt_hw_get_gtimer_frq
rt_hw_get_gtimer_frq:
mrs x0, CNTFRQ_EL0
ret
.global rt_hw_interrupt_is_disabled
rt_hw_interrupt_is_disabled:
mrs x0, DAIF
tst x0, #0xc0
cset x0, NE
ret
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs x0, DAIF
and x0, x0, #0xc0
cmp x0, #0xc0
/* branch if bits not both set(zero) */
bne 1f
ret
1:
msr DAIFSet, #3
dsb nsh
isb
ret
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
and x0, x0, #0xc0
cmp x0, #0xc0
/* branch if one of the bits not set(zero) */
bne 1f
ret
1:
isb
dsb nsh
and x0, x0, #0xc0
mrs x1, DAIF
bic x1, x1, #0xc0
orr x0, x0, x1
msr DAIF, x0
ret
.globl rt_hw_get_current_el
rt_hw_get_current_el:
mrs x0, CurrentEL
cmp x0, 0xc
b.eq 3f
cmp x0, 0x8
b.eq 2f
cmp x0, 0x4
b.eq 1f
ldr x0, =0
b 0f
3:
ldr x0, =3
b 0f
2:
ldr x0, =2
b 0f
1:
ldr x0, =1
b 0f
0:
ret
.globl rt_hw_set_current_vbar
rt_hw_set_current_vbar:
mrs x1, CurrentEL
cmp x1, 0xc
b.eq 3f
cmp x1, 0x8
b.eq 2f
cmp x1, 0x4
b.eq 1f
b 0f
3:
msr VBAR_EL3,x0
b 0f
2:
msr VBAR_EL2,x0
b 0f
1:
msr VBAR_EL1,x0
b 0f
0:
ret
.globl rt_hw_set_elx_env
rt_hw_set_elx_env:
mrs x1, CurrentEL
cmp x1, 0xc
b.eq 3f
cmp x1, 0x8
b.eq 2f
cmp x1, 0x4
b.eq 1f
b 0f
3:
mrs x0, SCR_EL3
orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
msr SCR_EL3, x0
b 0f
2:
mrs x0, HCR_EL2
orr x0, x0, #0x38
msr HCR_EL2, x0
b 0f
1:
b 0f
0:
ret
.globl rt_cpu_vector_set_base
rt_cpu_vector_set_base:
msr VBAR_EL1, x0
ret
/**
* unsigned long rt_hw_ffz(unsigned long x)
*/
.globl rt_hw_ffz
rt_hw_ffz:
mvn x1, x0
clz x0, x1
mov x1, #0x3f
sub x0, x1, x0
ret
.globl rt_hw_clz
rt_hw_clz:
clz x0, x0
ret
/**
* Spinlock (fallback implementation)
*/
rt_hw_spin_lock_init:
.weak rt_hw_spin_lock_init
stlr wzr, [x0]
ret
rt_hw_spin_trylock:
.weak rt_hw_spin_trylock
sub sp, sp, #16
ldar w2, [x0]
add x1, sp, 8
stlr w2, [x1]
ldarh w1, [x1]
and w1, w1, 65535
add x3, sp, 10
ldarh w3, [x3]
cmp w1, w3, uxth
beq 1f
mov w0, 0
add sp, sp, 16
ret
1:
add x1, sp, 10
2:
ldaxrh w3, [x1]
add w3, w3, 1
stlxrh w4, w3, [x1]
cbnz w4, 2b
add x1, sp, 8
ldar w1, [x1]
3:
ldaxr w3, [x0]
cmp w3, w2
bne 4f
stxr w4, w1, [x0]
cbnz w4, 3b
4:
cset w0, eq
add sp, sp, 16
ret
rt_hw_spin_lock:
.weak rt_hw_spin_lock
add x1, x0, 2
1:
ldxrh w2, [x1]
add w3, w2, 1
stxrh w4, w3, [x1]
cbnz w4, 1b
and w2, w2, 65535
ldarh w1, [x0]
cmp w2, w1, uxth
beq 3f
sevl
2:
wfe
ldaxrh w1, [x0]
cmp w2, w1
bne 2b
3:
ret
rt_hw_spin_unlock:
.weak rt_hw_spin_unlock
ldxrh w1, [x0]
add w1, w1, 1
stlxrh w2, w1, [x0]
cbnz w2, rt_hw_spin_unlock
ret

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-21 GuEe-GUI replace with drivers/psci
*/
#include <rthw.h>
#include <rtthread.h>
#define DBG_TAG "cpu.aa64"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <cpu.h>
#include <cpuport.h>
#include <psci.h>
static int psci_cpu_boot(rt_uint32_t cpuid, rt_uint64_t entry)
{
return rt_psci_cpu_on(cpuid, entry);
}
static void psci_cpu_shutdown(void)
{
rt_uint32_t state, state_id = PSCI_POWER_STATE_ID(0, 0, 0, PSCI_POWER_STATE_ID_POWERDOWN);
state = PSCI_POWER_STATE(PSCI_POWER_STATE_LEVEL_CORES, PSCI_POWER_STATE_TYPE_STANDBY, state_id);
rt_psci_cpu_off(state);
}
struct cpu_ops_t cpu_psci_ops =
{
.method = "psci",
.cpu_boot = psci_cpu_boot,
.cpu_shutdown = psci_cpu_shutdown,
};

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-21 GuEe-GUI replace with ofw
*/
#include <rthw.h>
#include <rtthread.h>
#define DBG_TAG "cpu.aa64"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <cpu.h>
#include <cpuport.h>
#include <ioremap.h>
#include <drivers/core/dm.h>
#ifdef RT_USING_OFW
static rt_uint64_t cpu_release_addr[RT_CPUS_NR];
static int spin_table_cpu_init(rt_uint32_t cpuid, void *param)
{
struct rt_ofw_node *cpu_np = param;
rt_ofw_prop_read_u64(cpu_np, "cpu-release-addr", &cpu_release_addr[cpuid]);
LOG_D("Using release address 0x%p for CPU %d", cpu_release_addr[cpuid], cpuid);
return 0;
}
static int spin_table_cpu_boot(rt_uint32_t cpuid, rt_uint64_t entry)
{
void *cpu_release_vaddr;
cpu_release_vaddr = rt_ioremap((void *)cpu_release_addr[cpuid], sizeof(cpu_release_addr[0]));
if (!cpu_release_vaddr)
{
LOG_E("IO remap failing");
return -1;
}
__asm__ volatile ("str %0, [%1]" ::"rZ"(entry), "r"(cpu_release_vaddr));
rt_hw_barrier(dsb, sy);
rt_hw_sev();
rt_iounmap(cpu_release_vaddr);
return 0;
}
struct cpu_ops_t cpu_spin_table_ops =
{
.method = "spin-table",
.cpu_init = spin_table_cpu_init,
.cpu_boot = spin_table_cpu_boot,
};
#endif

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-06-21 Zhangyan first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <board.h>
#ifdef RT_USING_CPU_FFS
/**
* This function finds the first bit set (beginning with the least significant bit)
* in value and return the index of that bit.
*
* Bits are numbered starting at 1 (the least significant bit). A return value of
* zero from any of these functions means that the argument was zero.
*
* @return return the index of the first bit set. If value is 0, then this function
* shall return 0.
*/
int __rt_ffs(int value)
{
#ifdef __GNUC__
return __builtin_ffs(value);
#else
__asm__ volatile (
"rbit w1, %w0\n"
"cmp %w0, 0\n"
"clz w1, w1\n"
"csinc %w0, wzr, w1, eq\n"
: "=r"(value)
: "0"(value)
);
return value;
#endif
}
unsigned long __rt_ffsl(unsigned long value)
{
#ifdef __GNUC__
return __builtin_ffsl(value);
#else
if (!value)
{
return 0;
}
__asm__ volatile ("rbit %0, %0" : "+r" (value));
return __rt_clz(value);
#endif
}
unsigned long __rt_clz(unsigned long value)
{
#ifdef __GNUC__
return __builtin_clz(value);
#else
unsigned long val;
__asm__ volatile ("clz %0, %1"
:"=r"(val)
:"r"(value));
return val;
#endif
}
#endif /* RT_USING_CPU_FFS */

View File

@ -0,0 +1,249 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-08 RT-Thread the first version
*/
#include "rtthread.h"
static void data_abort(unsigned long far, unsigned long iss)
{
rt_kprintf("fault addr = 0x%016lx\n", far);
if (iss & 0x40)
{
rt_kprintf("abort caused by write instruction\n");
}
else
{
rt_kprintf("abort caused by read instruction\n");
}
switch (iss & 0x3f)
{
case 0b000000:
rt_kprintf("Address size fault, zeroth level of translation or translation table base register\n");
break;
case 0b000001:
rt_kprintf("Address size fault, first level\n");
break;
case 0b000010:
rt_kprintf("Address size fault, second level\n");
break;
case 0b000011:
rt_kprintf("Address size fault, third level\n");
break;
case 0b000100:
rt_kprintf("Translation fault, zeroth level\n");
break;
case 0b000101:
rt_kprintf("Translation fault, first level\n");
break;
case 0b000110:
rt_kprintf("Translation fault, second level\n");
break;
case 0b000111:
rt_kprintf("Translation fault, third level\n");
break;
case 0b001000:
rt_kprintf("Access flag fault, zeroth level\n");
break;
case 0b001001:
rt_kprintf("Access flag fault, first level\n");
break;
case 0b001010:
rt_kprintf("Access flag fault, second level\n");
break;
case 0b001011:
rt_kprintf("Access flag fault, third level\n");
break;
case 0b001100:
rt_kprintf("Permission fault, zeroth level\n");
break;
case 0b001101:
rt_kprintf("Permission fault, first level\n");
break;
case 0b001110:
rt_kprintf("Permission fault, second level\n");
break;
case 0b001111:
rt_kprintf("Permission fault, third level\n");
break;
case 0b010000:
rt_kprintf("Synchronous external abort, not on translation table walk\n");
break;
case 0b011000:
rt_kprintf("Synchronous parity or ECC error on memory access, not on translation table walk\n");
break;
case 0b010100:
rt_kprintf("Synchronous external abort on translation table walk, zeroth level\n");
break;
case 0b010101:
rt_kprintf("Synchronous external abort on translation table walk, first level\n");
break;
case 0b010110:
rt_kprintf("Synchronous external abort on translation table walk, second level\n");
break;
case 0b010111:
rt_kprintf("Synchronous external abort on translation table walk, third level\n");
break;
case 0b011100:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, zeroth level\n");
break;
case 0b011101:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, first level\n");
break;
case 0b011110:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, second level\n");
break;
case 0b011111:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, third level\n");
break;
case 0b100001:
rt_kprintf("Alignment fault\n");
break;
case 0b110000:
rt_kprintf("TLB conflict abort\n");
break;
case 0b110100:
rt_kprintf("IMPLEMENTATION DEFINED fault (Lockdown fault)\n");
break;
case 0b110101:
rt_kprintf("IMPLEMENTATION DEFINED fault (Unsupported Exclusive access fault)\n");
break;
case 0b111101:
rt_kprintf("Section Domain Fault, used only for faults reported in the PAR_EL1\n");
break;
case 0b111110:
rt_kprintf("Page Domain Fault, used only for faults reported in the PAR_EL1\n");
break;
default:
rt_kprintf("unknow abort\n");
break;
}
}
void print_exception(unsigned long esr, unsigned long epc)
{
rt_uint8_t ec;
rt_uint32_t iss;
unsigned long fault_addr;
rt_kprintf("\nexception info:\n");
ec = (unsigned char)((esr >> 26) & 0x3fU);
iss = (unsigned int)(esr & 0x00ffffffU);
rt_kprintf("esr.EC :0x%02x\n", ec);
rt_kprintf("esr.IL :0x%02x\n", (unsigned char)((esr >> 25) & 0x01U));
rt_kprintf("esr.ISS:0x%08x\n", iss);
rt_kprintf("epc :0x%016p\n", (void *)epc);
switch (ec)
{
case 0x00:
rt_kprintf("Exceptions with an unknow reason\n");
break;
case 0x01:
rt_kprintf("Exceptions from an WFI or WFE instruction\n");
break;
case 0x03:
rt_kprintf("Exceptions from an MCR or MRC access to CP15 from AArch32\n");
break;
case 0x04:
rt_kprintf("Exceptions from an MCRR or MRRC access to CP15 from AArch32\n");
break;
case 0x05:
rt_kprintf("Exceptions from an MCR or MRC access to CP14 from AArch32\n");
break;
case 0x06:
rt_kprintf("Exceptions from an LDC or STC access to CP14 from AArch32\n");
break;
case 0x07:
rt_kprintf("Exceptions from Access to Advanced SIMD or floating-point registers\n");
break;
case 0x08:
rt_kprintf("Exceptions from an MRC (or VMRS) access to CP10 from AArch32\n");
break;
case 0x0c:
rt_kprintf("Exceptions from an MCRR or MRRC access to CP14 from AArch32\n");
break;
case 0x0e:
rt_kprintf("Exceptions that occur because ther value of PSTATE.IL is 1\n");
break;
case 0x11:
rt_kprintf("SVC call from AArch32 state\n");
break;
case 0x15:
rt_kprintf("SVC call from AArch64 state\n");
break;
case 0x20:
rt_kprintf("Instruction abort from lower exception level\n");
break;
case 0x21:
rt_kprintf("Instruction abort from current exception level\n");
break;
case 0x22:
rt_kprintf("PC alignment fault\n");
break;
case 0x24:
rt_kprintf("Data abort from a lower Exception level\n");
__asm__ volatile("mrs %0, far_el1":"=r"(fault_addr));
data_abort(fault_addr, iss);
break;
case 0x25:
rt_kprintf("Data abort\n");
__asm__ volatile("mrs %0, far_el1":"=r"(fault_addr));
data_abort(fault_addr, iss);
break;
default:
rt_kprintf("Other error\n");
break;
}
}

View File

@ -0,0 +1,519 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
*/
#include <rthw.h>
#include <rtthread.h>
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV2)
#include "gic.h"
#include "cp15.h"
struct arm_gic
{
rt_uint64_t offset; /* the first interrupt index in the vector table */
rt_uint64_t dist_hw_base; /* the base address of the gic distributor */
rt_uint64_t cpu_hw_base; /* the base addrees of the gic cpu interface */
};
/* 'ARM_GIC_MAX_NR' is the number of cores */
static struct arm_gic _gic_table[ARM_GIC_MAX_NR];
/** Macro to access the Generic Interrupt Controller Interface (GICC)
*/
#define GIC_CPU_CTRL(hw_base) __REG32((hw_base) + 0x00U)
#define GIC_CPU_PRIMASK(hw_base) __REG32((hw_base) + 0x04U)
#define GIC_CPU_BINPOINT(hw_base) __REG32((hw_base) + 0x08U)
#define GIC_CPU_INTACK(hw_base) __REG32((hw_base) + 0x0cU)
#define GIC_CPU_EOI(hw_base) __REG32((hw_base) + 0x10U)
#define GIC_CPU_RUNNINGPRI(hw_base) __REG32((hw_base) + 0x14U)
#define GIC_CPU_HIGHPRI(hw_base) __REG32((hw_base) + 0x18U)
#define GIC_CPU_IIDR(hw_base) __REG32((hw_base) + 0xFCU)
/** Macro to access the Generic Interrupt Controller Distributor (GICD)
*/
#define GIC_DIST_CTRL(hw_base) __REG32((hw_base) + 0x000U)
#define GIC_DIST_TYPE(hw_base) __REG32((hw_base) + 0x004U)
#define GIC_DIST_IGROUP(hw_base, n) __REG32((hw_base) + 0x080U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_SET(hw_base, n) __REG32((hw_base) + 0x100U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) __REG32((hw_base) + 0x180U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_SET(hw_base, n) __REG32((hw_base) + 0x200U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_CLEAR(hw_base, n) __REG32((hw_base) + 0x280U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_SET(hw_base, n) __REG32((hw_base) + 0x300U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) __REG32((hw_base) + 0x380U + ((n)/32U) * 4U)
#define GIC_DIST_PRI(hw_base, n) __REG32((hw_base) + 0x400U + ((n)/4U) * 4U)
#define GIC_DIST_TARGET(hw_base, n) __REG32((hw_base) + 0x800U + ((n)/4U) * 4U)
#define GIC_DIST_CONFIG(hw_base, n) __REG32((hw_base) + 0xc00U + ((n)/16U) * 4U)
#define GIC_DIST_SOFTINT(hw_base) __REG32((hw_base) + 0xf00U)
#define GIC_DIST_CPENDSGI(hw_base, n) __REG32((hw_base) + 0xf10U + ((n)/4U) * 4U)
#define GIC_DIST_SPENDSGI(hw_base, n) __REG32((hw_base) + 0xf20U + ((n)/4U) * 4U)
#define GIC_DIST_ICPIDR2(hw_base) __REG32((hw_base) + 0xfe8U)
static unsigned int _gic_max_irq;
int arm_gic_get_active_irq(rt_uint64_t index)
{
int irq;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = GIC_CPU_INTACK(_gic_table[index].cpu_hw_base);
irq += _gic_table[index].offset;
return irq;
}
void arm_gic_ack(rt_uint64_t index, int irq)
{
rt_uint64_t mask = 1U << (irq % 32U);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
GIC_CPU_EOI(_gic_table[index].cpu_hw_base) = irq;
}
void arm_gic_mask(rt_uint64_t index, int irq)
{
rt_uint64_t mask = 1U << (irq % 32U);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
void arm_gic_umask(rt_uint64_t index, int irq)
{
rt_uint64_t mask = 1U << (irq % 32U);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask;
}
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq)
{
rt_uint64_t pend;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
if (irq >= 16U)
{
pend = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
}
else
{
/* INTID 0-15 Software Generated Interrupt */
pend = (GIC_DIST_SPENDSGI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL;
/* No CPU identification offered */
if (pend != 0U)
{
pend = 1U;
}
else
{
pend = 0U;
}
}
return (pend);
}
void arm_gic_set_pending_irq(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
if (irq >= 16U)
{
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) = 1U << (irq % 32U);
}
else
{
/* INTID 0-15 Software Generated Interrupt */
/* Forward the interrupt to the CPU interface that requested it */
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = (irq | 0x02000000U);
}
}
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq)
{
rt_uint64_t mask;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
if (irq >= 16U)
{
mask = 1U << (irq % 32U);
GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
else
{
mask = 1U << ((irq % 4U) * 8U);
GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = mask;
}
}
void arm_gic_set_configuration(rt_uint64_t index, int irq, uint32_t config)
{
rt_uint64_t icfgr;
rt_uint64_t shift;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
icfgr = GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq);
shift = (irq % 16U) << 1U;
icfgr &= (~(3U << shift));
icfgr |= (config << (shift + 1));
GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) = icfgr;
}
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
return (GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) >> ((irq % 16U) >> 1U));
}
void arm_gic_clear_active(rt_uint64_t index, int irq)
{
rt_uint64_t mask = 1U << (irq % 32U);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
/* Set up the cpu mask for the specific interrupt */
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask)
{
rt_uint64_t old_tgt;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq);
old_tgt &= ~(0x0FFUL << ((irq % 4U)*8U));
old_tgt |= cpumask << ((irq % 4U)*8U);
GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt;
}
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
return (GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL;
}
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority)
{
rt_uint64_t mask;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
mask = GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq);
mask &= ~(0xFFUL << ((irq % 4U) * 8U));
mask |= ((priority & 0xFFUL) << ((irq % 4U) * 8U));
GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) = mask;
}
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
return (GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL;
}
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
/* set priority mask */
GIC_CPU_PRIMASK(_gic_table[index].cpu_hw_base) = priority & 0xFFUL;
}
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
return GIC_CPU_PRIMASK(_gic_table[index].cpu_hw_base);
}
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point)
{
GIC_CPU_BINPOINT(_gic_table[index].cpu_hw_base) = binary_point & 0x7U;
}
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index)
{
return GIC_CPU_BINPOINT(_gic_table[index].cpu_hw_base);
}
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq)
{
rt_uint64_t pending;
rt_uint64_t active;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
active = (GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
pending = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
return ((active << 1U) | pending);
}
void arm_gic_send_sgi(rt_uint64_t index, int irq, rt_uint64_t target_list, rt_uint64_t filter_list)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) =
((filter_list & 0x3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (irq & 0x0FUL);
}
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
return GIC_CPU_HIGHPRI(_gic_table[index].cpu_hw_base);
}
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
return GIC_CPU_IIDR(_gic_table[index].cpu_hw_base);
}
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group)
{
uint32_t igroupr;
uint32_t shift;
RT_ASSERT(index < ARM_GIC_MAX_NR);
RT_ASSERT(group <= 1U);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
igroupr = GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq);
shift = (irq % 32U);
igroupr &= (~(1U << shift));
igroupr |= ((group & 0x1U) << shift);
GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) = igroupr;
}
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0U);
return (GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
}
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
{
unsigned int gic_type, i;
rt_uint64_t cpumask = 1U << 0U;
RT_ASSERT(index < ARM_GIC_MAX_NR);
_gic_table[index].dist_hw_base = dist_base;
_gic_table[index].offset = irq_start;
/* Find out how many interrupts are supported. */
gic_type = GIC_DIST_TYPE(dist_base);
_gic_max_irq = ((gic_type & 0x1fU) + 1U) * 32U;
/*
* The GIC only supports up to 1020 interrupt sources.
* Limit this to either the architected maximum, or the
* platform maximum.
*/
if (_gic_max_irq > 1020U)
{
_gic_max_irq = 1020U;
}
if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */
{
_gic_max_irq = ARM_GIC_NR_IRQS;
}
cpumask |= cpumask << 8U;
cpumask |= cpumask << 16U;
cpumask |= cpumask << 24U;
GIC_DIST_CTRL(dist_base) = 0x0U;
/* Set all global interrupts to be level triggered, active low. */
for (i = 32U; i < _gic_max_irq; i += 16U)
{
GIC_DIST_CONFIG(dist_base, i) = 0x0U;
}
/* Set all global interrupts to this CPU only. */
for (i = 32U; i < _gic_max_irq; i += 4U)
{
GIC_DIST_TARGET(dist_base, i) = cpumask;
}
/* Set priority on all interrupts. */
for (i = 0U; i < _gic_max_irq; i += 4U)
{
GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0U;
}
/* Disable all interrupts. */
for (i = 0U; i < _gic_max_irq; i += 32U)
{
GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffffU;
}
/* All interrupts defaults to IGROUP1(IRQ). */
/*
for (i = 0; i < _gic_max_irq; i += 32)
{
GIC_DIST_IGROUP(dist_base, i) = 0xffffffffU;
}
*/
for (i = 0U; i < _gic_max_irq; i += 32U)
{
GIC_DIST_IGROUP(dist_base, i) = 0U;
}
/* Enable group0 and group1 interrupt forwarding. */
GIC_DIST_CTRL(dist_base) = 0x01U;
return 0;
}
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
if (!_gic_table[index].cpu_hw_base)
{
_gic_table[index].cpu_hw_base = cpu_base;
}
cpu_base = _gic_table[index].cpu_hw_base;
GIC_CPU_PRIMASK(cpu_base) = 0xf0U;
GIC_CPU_BINPOINT(cpu_base) = 0x7U;
/* Enable CPU interrupt */
GIC_CPU_CTRL(cpu_base) = 0x01U;
return 0;
}
void arm_gic_dump_type(rt_uint64_t index)
{
unsigned int gic_type;
gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base);
rt_kprintf("GICv%d on %p, max IRQs: %d, %s security extension(%08x)\n",
(GIC_DIST_ICPIDR2(_gic_table[index].dist_hw_base) >> 4U) & 0xfUL,
_gic_table[index].dist_hw_base,
_gic_max_irq,
gic_type & (1U << 10U) ? "has" : "no",
gic_type);
}
void arm_gic_dump(rt_uint64_t index)
{
unsigned int i, k;
k = GIC_CPU_HIGHPRI(_gic_table[index].cpu_hw_base);
rt_kprintf("--- high pending priority: %d(%08x)\n", k, k);
rt_kprintf("--- hw mask ---\n");
for (i = 0U; i < _gic_max_irq / 32U; i++)
{
rt_kprintf("0x%08x, ",
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base,
i * 32U));
}
rt_kprintf("\n--- hw pending ---\n");
for (i = 0U; i < _gic_max_irq / 32U; i++)
{
rt_kprintf("0x%08x, ",
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base,
i * 32U));
}
rt_kprintf("\n--- hw active ---\n");
for (i = 0U; i < _gic_max_irq / 32U; i++)
{
rt_kprintf("0x%08x, ",
GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base,
i * 32U));
}
rt_kprintf("\n");
}
long gic_dump(void)
{
arm_gic_dump_type(0);
arm_gic_dump(0);
return 0;
}
MSH_CMD_EXPORT(gic_dump, show gic status);
#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV2) */

View File

@ -0,0 +1,856 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
* 2022-03-08 GuEe-GUI add BSP bind SPI CPU self support
* add GICv3 AArch64 system register interface
* modify arm_gic_redist_init() args
* modify arm_gic_cpu_init() args
* modify arm_gic_send_affinity_sgi() args
* remove arm_gic_redist_address_set()
* remove arm_gic_cpu_interface_address_set()
* remove arm_gic_secondary_cpu_init()
* remove get_main_cpu_affval()
* remove arm_gic_cpumask_to_affval()
*/
#include <rthw.h>
#include <rtthread.h>
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3)
#include <gicv3.h>
#include <cp15.h>
#include <board.h>
#ifndef ARM_SPI_BIND_CPU_ID
#define ARM_SPI_BIND_CPU_ID 0
#endif
#if !defined(RT_USING_SMP) && !defined(RT_USING_AMP)
#define RT_CPUS_NR 1
#else
extern rt_uint64_t rt_cpu_mpidr_early[];
#endif /* RT_USING_SMP */
/* 'ARM_GIC_MAX_NR' is the number of cores */
static struct arm_gic _gic_table[ARM_GIC_MAX_NR];
static unsigned int _gic_max_irq;
int arm_gic_get_active_irq(rt_uint64_t index)
{
rt_base_t irq;
RT_ASSERT(index < ARM_GIC_MAX_NR);
GET_GICV3_REG(ICC_IAR1_EL1, irq);
irq = (irq & 0x1ffffff) + _gic_table[index].offset;
return irq;
}
void arm_gic_ack(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
RT_ASSERT(irq >= 0);
__DSB();
SET_GICV3_REG(ICC_EOIR1_EL1, (rt_base_t)irq);
}
void arm_gic_mask(rt_uint64_t index, int irq)
{
rt_uint64_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
if (irq < 32)
{
rt_int32_t cpu_id = rt_hw_cpu_id();
GIC_RDISTSGI_ICENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask;
}
else
{
GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
}
void arm_gic_umask(rt_uint64_t index, int irq)
{
rt_uint64_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
if (irq < 32)
{
rt_int32_t cpu_id = rt_hw_cpu_id();
GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask;
}
else
{
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask;
}
}
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq)
{
rt_uint64_t pend;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
if (irq >= 16)
{
pend = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
}
else
{
/* INTID 0-15 Software Generated Interrupt */
pend = (GIC_DIST_SPENDSGI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
/* No CPU identification offered */
if (pend != 0)
{
pend = 1;
}
else
{
pend = 0;
}
}
return pend;
}
void arm_gic_set_pending_irq(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
if (irq >= 16)
{
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) = 1 << (irq % 32);
}
else
{
/* INTID 0-15 Software Generated Interrupt */
/* Forward the interrupt to the CPU interface that requested it */
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = (irq | 0x02000000);
}
}
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq)
{
rt_uint64_t mask;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
if (irq >= 16)
{
mask = 1 << (irq % 32);
GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
else
{
mask = 1 << ((irq % 4) * 8);
GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = mask;
}
}
void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config)
{
rt_uint64_t icfgr;
rt_uint64_t shift;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
icfgr = GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq);
shift = (irq % 16) << 1;
icfgr &= (~(3 << shift));
icfgr |= (config << (shift + 1));
GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) = icfgr;
}
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
return (GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) >> ((irq % 16) >> 1));
}
void arm_gic_clear_active(rt_uint64_t index, int irq)
{
rt_uint64_t mask = 1 << (irq % 32);
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
}
void arm_gic_set_router_cpu(rt_uint64_t index, int irq, rt_uint64_t aff)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 32);
GIC_DIST_IROUTER(_gic_table[index].dist_hw_base, irq) = aff & 0xff00ffffffULL;
}
rt_uint64_t arm_gic_get_router_cpu(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 32);
return GIC_DIST_IROUTER(_gic_table[index].dist_hw_base, irq);
}
/* Set up the cpu mask for the specific interrupt */
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask)
{
rt_uint64_t old_tgt;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq);
old_tgt &= ~(0x0ff << ((irq % 4) * 8));
old_tgt |= cpumask << ((irq % 4) * 8);
GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt;
}
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
return (GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
}
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority)
{
rt_uint64_t mask;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
if (irq < 32)
{
rt_int32_t cpu_id = rt_hw_cpu_id();
mask = GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq);
mask &= ~(0xffUL << ((irq % 4) * 8));
mask |= ((priority & 0xff) << ((irq % 4) * 8));
GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) = mask;
}
else
{
mask = GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq);
mask &= ~(0xff << ((irq % 4) * 8));
mask |= ((priority & 0xff) << ((irq % 4) * 8));
GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) = mask;
}
}
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
if (irq < 32)
{
rt_int32_t cpu_id = rt_hw_cpu_id();
return (GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) >> ((irq % 4) * 8)) & 0xff;
}
else
{
return (GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
}
}
void arm_gic_set_system_register_enable_mask(rt_uint64_t index, rt_uint64_t value)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
value &= 0xff;
/* set priority mask */
SET_GICV3_REG(ICC_SRE_EL1, value);
__ISB();
}
rt_uint64_t arm_gic_get_system_register_enable_mask(rt_uint64_t index)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
rt_uint64_t value;
GET_GICV3_REG(ICC_SRE_EL1, value);
return value;
}
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
priority &= 0xff;
/* set priority mask */
SET_GICV3_REG(ICC_PMR_EL1, priority);
}
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
rt_uint64_t priority;
GET_GICV3_REG(ICC_PMR_EL1, priority);
return priority;
}
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point)
{
RT_UNUSED(index);
binary_point &= 0x7;
SET_GICV3_REG(ICC_BPR1_EL1, binary_point);
}
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index)
{
rt_uint64_t binary_point;
RT_UNUSED(index);
GET_GICV3_REG(ICC_BPR1_EL1, binary_point);
return binary_point;
}
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq)
{
rt_uint64_t pending, active;
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
active = (GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
pending = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
return ((active << 1) | pending);
}
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
struct gicv3_sgi_aff
{
rt_uint64_t aff;
rt_uint32_t cpu_mask[(RT_CPUS_NR + 31) >> 5];
rt_uint16_t target_list;
};
static struct gicv3_sgi_aff sgi_aff_table[RT_CPUS_NR];
static rt_uint64_t sgi_aff_table_num;
static void sgi_aff_add_table(rt_uint64_t aff, rt_uint64_t cpu_index)
{
rt_uint64_t i;
for (i = 0; i < sgi_aff_table_num; i++)
{
if (sgi_aff_table[i].aff == aff)
{
sgi_aff_table[i].cpu_mask[cpu_index >> 5] |= (1 << (cpu_index & 0x1F));
return;
}
}
sgi_aff_table[sgi_aff_table_num].aff = aff;
sgi_aff_table[sgi_aff_table_num].cpu_mask[cpu_index >> 5] |= (1 << (cpu_index & 0x1F));
sgi_aff_table_num++;
}
static rt_uint64_t gicv3_sgi_init(void)
{
rt_uint64_t i, icc_sgi1r_value;
for (i = 0; i < RT_CPUS_NR; i++)
{
icc_sgi1r_value = (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 8) & 0xFF) << 16;
icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 16) & 0xFF) << 32;
icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 32) & 0xFF) << 48;
icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 4) & 0xF) << 44;
sgi_aff_add_table(icc_sgi1r_value, i);
}
return (RT_CPUS_NR + 31) >> 5;
}
rt_inline void gicv3_sgi_send(rt_uint64_t int_id)
{
rt_uint64_t i;
for (i = 0; i < sgi_aff_table_num; i++)
{
if (sgi_aff_table[i].target_list)
{
__DSB();
/* Interrupts routed to the PEs specified by Aff3.Aff2.Aff1.<target list>. */
SET_GICV3_REG(ICC_SGI1R_EL1, sgi_aff_table[i].aff | int_id | sgi_aff_table[i].target_list);
__ISB();
sgi_aff_table[i].target_list = 0;
}
}
}
rt_inline void gicv3_sgi_target_list_set(rt_uint64_t array, rt_uint32_t cpu_mask)
{
rt_uint64_t i, value;
for (i = 0; i < sgi_aff_table_num; i++)
{
if (sgi_aff_table[i].cpu_mask[array] & cpu_mask)
{
while (cpu_mask)
{
value = __builtin_ctzl(cpu_mask);
cpu_mask &= ~(1 << value);
sgi_aff_table[i].target_list |= 1 << (rt_cpu_mpidr_early[(array << 5) | value] & 0xF);
}
}
}
}
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode)
{
rt_uint64_t i;
rt_uint64_t int_id = (irq & 0xf) << 24;
static rt_uint64_t masks_nrs = 0;
if (routing_mode == GICV3_ROUTED_TO_SPEC)
{
if (!masks_nrs)
{
masks_nrs = gicv3_sgi_init();
}
for (i = 0; i < masks_nrs; i++)
{
if (cpu_masks[i] == 0)
{
continue;
}
gicv3_sgi_target_list_set(i, cpu_masks[i]);
}
gicv3_sgi_send(int_id);
}
else
{
__DSB();
/* Interrupts routed to all PEs in the system, excluding "self". */
SET_GICV3_REG(ICC_SGI1R_EL1, (0x10000000000ULL) | int_id);
__ISB();
}
}
#endif /* defined(RT_USING_SMP) || defined(RT_USING_AMP) */
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index)
{
rt_uint64_t irq;
RT_ASSERT(index < ARM_GIC_MAX_NR);
RT_UNUSED(index);
GET_GICV3_REG(ICC_HPPIR1_EL1, irq);
return irq;
}
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index)
{
rt_uint64_t ret = 0;
rt_base_t level;
int cpuid;
RT_ASSERT(index < ARM_GIC_MAX_NR);
level = rt_hw_local_irq_disable();
cpuid = rt_hw_cpu_id();
if (_gic_table[index].cpu_hw_base[cpuid] != RT_NULL)
{
ret = GIC_CPU_IIDR(_gic_table[index].cpu_hw_base[cpuid]);
}
rt_hw_local_irq_enable(level);
return ret;
}
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group)
{
rt_uint64_t igroupr;
rt_uint64_t shift;
RT_ASSERT(index < ARM_GIC_MAX_NR);
RT_ASSERT(group <= 1);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
igroupr = GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq);
shift = (irq % 32);
igroupr &= (~(1U << shift));
igroupr |= ((group & 0x1U) << shift);
GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) = igroupr;
}
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq)
{
RT_ASSERT(index < ARM_GIC_MAX_NR);
irq = irq - _gic_table[index].offset;
RT_ASSERT(irq >= 0);
return (GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1UL;
}
static int arm_gicv3_wait_rwp(rt_uint64_t index, rt_uint64_t irq)
{
rt_uint64_t rwp_bit;
rt_uint64_t base;
RT_ASSERT(index < ARM_GIC_MAX_NR);
if (irq < 32)
{
rt_int32_t cpu_id = rt_hw_cpu_id();
base = _gic_table[index].redist_hw_base[cpu_id];
rwp_bit = GICR_CTLR_RWP;
}
else
{
base = _gic_table[index].dist_hw_base;
rwp_bit = GICD_CTLR_RWP;
}
while (HWREG32(base) & rwp_bit)
{
}
return 0;
}
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
{
int i;
unsigned int gic_type;
rt_uint64_t main_cpu_affinity_val;
RT_UNUSED(i);
RT_UNUSED(main_cpu_affinity_val);
RT_ASSERT(index < ARM_GIC_MAX_NR);
_gic_table[index].dist_hw_base = dist_base;
_gic_table[index].offset = irq_start;
/* Find out how many interrupts are supported. */
gic_type = GIC_DIST_TYPE(dist_base);
_gic_max_irq = ((gic_type & 0x1f) + 1) * 32;
/*
* The GIC only supports up to 1020 interrupt sources.
* Limit this to either the architected maximum, or the
* platform maximum.
*/
if (_gic_max_irq > 1020)
{
_gic_max_irq = 1020;
}
if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */
{
_gic_max_irq = ARM_GIC_NR_IRQS;
}
#ifndef RT_AMP_SLAVE
GIC_DIST_CTRL(dist_base) = 0;
/* Wait for register write pending */
arm_gicv3_wait_rwp(0, 32);
/* Set all global interrupts to be level triggered, active low. */
for (i = 32; i < _gic_max_irq; i += 16)
{
GIC_DIST_CONFIG(dist_base, i) = 0;
}
arm_gicv3_wait_rwp(0, 32);
#ifdef RT_USING_SMP
main_cpu_affinity_val = rt_cpu_mpidr_early[ARM_SPI_BIND_CPU_ID];
#else
__asm__ volatile ("mrs %0, mpidr_el1":"=r"(main_cpu_affinity_val));
#endif
/* aff3[39:32], aff2[23:16], aff1[15:8], aff0[7:0] */
main_cpu_affinity_val &= 0xff00ffffffULL;
/* Set all global interrupts to this CPU only. */
for (i = 32; i < _gic_max_irq; i++)
{
GIC_DIST_IROUTER(dist_base, i) = main_cpu_affinity_val | (GICV3_ROUTED_TO_SPEC << 31);
}
arm_gicv3_wait_rwp(0, 32);
/* Set priority on spi interrupts. */
for (i = 32; i < _gic_max_irq; i += 4)
{
GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0;
}
arm_gicv3_wait_rwp(0, 32);
/* Disable all interrupts. */
for (i = 0; i < _gic_max_irq; i += 32)
{
GIC_DIST_PENDING_CLEAR(dist_base, i) = 0xffffffff;
GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffff;
}
arm_gicv3_wait_rwp(0, 32);
/* All interrupts defaults to IGROUP1(IRQ). */
for (i = 0; i < _gic_max_irq; i += 32)
{
GIC_DIST_IGROUP(dist_base, i) = 0xffffffff;
}
arm_gicv3_wait_rwp(0, 32);
/*
* The Distributor control register (GICD_CTLR) must be configured to enable the interrupt groups and to set the routing mode.
* Enable Affinity routing (ARE bits) The ARE bits in GICD_CTLR control whether affinity routing is enabled.
* If affinity routing is not enabled, GICv3 can be configured for legacy operation.
* Whether affinity routing is enabled or not can be controlled separately for Secure and Non-secure state.
* Enables GICD_CTLR contains separate enable bits for Group 0, Secure Group 1 and Non-secure Group 1:
* GICD_CTLR.EnableGrp1S enables distribution of Secure Group 1 interrupts.
* GICD_CTLR.EnableGrp1NS enables distribution of Non-secure Group 1 interrupts.
* GICD_CTLR.EnableGrp0 enables distribution of Group 0 interrupts.
*/
GIC_DIST_CTRL(dist_base) = GICD_CTLR_ARE_NS | GICD_CTLR_ENGRP1NS;
#endif /* RT_AMP_SLAVE */
return 0;
}
int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base)
{
int i;
int cpu_id = rt_hw_cpu_id();
static int master_cpu_id = -1;
RT_ASSERT(index < ARM_GIC_MAX_NR);
if (master_cpu_id < 0)
{
master_cpu_id = 0;
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, &master_cpu_id, sizeof(master_cpu_id));
}
if (!_gic_table[index].redist_hw_base[master_cpu_id])
{
_gic_table[index].redist_hw_base[master_cpu_id] = redist_base;
}
redist_base = _gic_table[index].redist_hw_base[master_cpu_id];
redist_base += cpu_id * (2 << 16);
_gic_table[index].redist_hw_base[cpu_id] = redist_base;
/* redistributor enable */
GIC_RDIST_WAKER(redist_base) &= ~(1 << 1);
while (GIC_RDIST_WAKER(redist_base) & (1 << 2))
{
}
/* Disable all sgi and ppi interrupt */
GIC_RDISTSGI_ICENABLER0(redist_base) = 0xffffffff;
arm_gicv3_wait_rwp(0, 0);
/* Clear all inetrrupt pending */
GIC_RDISTSGI_ICPENDR0(redist_base) = 0xffffffff;
/* the corresponding interrupt is Group 1 or Non-secure Group 1. */
GIC_RDISTSGI_IGROUPR0(redist_base, 0) = 0xffffffff;
GIC_RDISTSGI_IGRPMODR0(redist_base, 0) = 0xffffffff;
/* Configure default priorities for SGI 0:15 and PPI 16:31. */
for (i = 0; i < 32; i += 4)
{
GIC_RDISTSGI_IPRIORITYR(redist_base, i) = 0xa0a0a0a0U;
}
/* Trigger level for PPI interrupts*/
GIC_RDISTSGI_ICFGR1(redist_base) = 0;
return 0;
}
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base)
{
rt_uint64_t value;
int cpu_id = rt_hw_cpu_id();
RT_ASSERT(index < ARM_GIC_MAX_NR);
_gic_table[index].cpu_hw_base[cpu_id] = cpu_base;
value = arm_gic_get_system_register_enable_mask(index);
value |= (1 << 0);
arm_gic_set_system_register_enable_mask(index, value);
SET_GICV3_REG(ICC_CTLR_EL1, 0l);
arm_gic_set_interface_prior_mask(index, 0xff);
/* Enable group1 interrupt */
value = 1;
SET_GICV3_REG(ICC_IGRPEN1_EL1, value);
arm_gic_set_binary_point(0, 0);
/* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts. */
value = 1; /* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts.*/
value |= 1 << 18; /* Targeted SGIs with affinity level 0 values of 0 - 255 are supported. */
SET_GICV3_REG(ICC_CTLR_EL1, value);
return 0;
}
void arm_gic_dump_type(rt_uint64_t index)
{
unsigned int gic_type;
unsigned int gic_version;
unsigned int gic_rp;
gic_version = (GIC_DIST_IIDR(_gic_table[index].dist_hw_base) >> 24) & 0xfUL;
gic_rp = (GIC_DIST_IIDR(_gic_table[index].dist_hw_base) >> 12) & 0xfUL;
gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base);
rt_kprintf("GICv3-%d r%dp%d on %p, max IRQs: %d, %s security extension(%08x)\n",
(gic_version == 0) ? 500 : (gic_version == 2) ? 600 : 0,
(gic_rp >> 4) & 0xF,
gic_rp & 0xF,
_gic_table[index].dist_hw_base,
_gic_max_irq,
gic_type & (1U << 10U) ? "has" : "no",
gic_type);
}
void arm_gic_dump(rt_uint64_t index)
{
int i;
unsigned int val;
val = arm_gic_get_high_pending_irq(0);
rt_kprintf("--- high pending priority: %d(%08x)\n", val, val);
rt_kprintf("--- hw mask ---\n");
for (i = 0; i < _gic_max_irq / 32; ++i)
{
rt_kprintf("0x%08x, ", GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, i * 32));
}
rt_kprintf("\b\b\n--- hw pending ---\n");
for (i = 0; i < _gic_max_irq / 32; ++i)
{
rt_kprintf("0x%08x, ", GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, i * 32));
}
rt_kprintf("\b\b\n--- hw active ---\n");
for (i = 0; i < _gic_max_irq / 32; ++i)
{
rt_kprintf("0x%08x, ", GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, i * 32));
}
rt_kprintf("\b\b\n");
}
static void arm_gic_bind_dump(void)
{
#ifdef BSP_USING_GICV3
int i;
for (i = 32; i < _gic_max_irq; i++)
{
rt_kprintf("irq(%d) -> 0x%X\n", i, arm_gic_get_router_cpu(0, i));
}
#endif /* BSP_USING_GICV3 */
}
rt_uint64_t *arm_gic_get_gic_table_addr(void)
{
return (rt_uint64_t *)&_gic_table[0];
}
static void arm_gic_sgi_dump(rt_uint64_t index)
{
rt_int32_t cpu_id = rt_hw_cpu_id();
rt_kprintf("redist_hw_base = 0x%X\n", _gic_table[index].redist_hw_base[cpu_id]);
rt_kprintf("--- sgi mask ---\n");
rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]));
rt_kprintf("--- sgi pending ---\n");
rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISPENDR0(_gic_table[index].redist_hw_base[cpu_id]));
rt_kprintf("--- sgi active ---\n");
rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISACTIVER0(_gic_table[index].redist_hw_base[cpu_id]));
}
long gic_dump(void)
{
arm_gic_dump_type(0);
arm_gic_dump(0);
arm_gic_bind_dump();
arm_gic_sgi_dump(0);
return 0;
}
MSH_CMD_EXPORT(gic_dump, show gic status);
#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) */

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-12-20 GuEe-GUI first version
*/
#include <rtthread.h>
#include <rthw.h>
#include <gtimer.h>
#include <cpuport.h>
#ifdef RT_USING_KTIME
#include <ktime.h>
#endif
#define EL1_PHY_TIMER_IRQ_NUM 30
static volatile rt_uint64_t timer_step;
static void rt_hw_timer_isr(int vector, void *parameter)
{
rt_hw_set_gtimer_val(timer_step);
rt_tick_increase();
}
void rt_hw_gtimer_init(void)
{
rt_hw_interrupt_install(EL1_PHY_TIMER_IRQ_NUM, rt_hw_timer_isr, RT_NULL, "tick");
rt_hw_isb();
timer_step = rt_hw_get_gtimer_frq();
rt_hw_dsb();
timer_step /= RT_TICK_PER_SECOND;
rt_hw_gtimer_local_enable();
}
void rt_hw_gtimer_local_enable(void)
{
rt_hw_gtimer_disable();
rt_hw_set_gtimer_val(timer_step);
rt_hw_interrupt_umask(EL1_PHY_TIMER_IRQ_NUM);
#ifdef RT_USING_KTIME
rt_ktime_cputimer_init();
#endif
rt_hw_gtimer_enable();
}
void rt_hw_gtimer_local_disable(void)
{
rt_hw_gtimer_disable();
rt_hw_interrupt_mask(EL1_PHY_TIMER_IRQ_NUM);
}

View File

@ -0,0 +1,16 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-02-24 GuEe-GUI first version
*/
#include <hypercall.h>
rt_err_t rt_hv_stage2_map(unsigned long paddr, unsigned long size)
{
return rt_hw_hypercall(120, paddr & (~4095), (paddr & (~4095)) + size, (1 << 0) | (1 << 1) | (1 << 4), 0, 0, 0, 0);
}

View File

@ -0,0 +1,187 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
*/
#ifndef __ARMV8_H__
#define __ARMV8_H__
#include <rtconfig.h>
#ifdef ARCH_USING_HW_THREAD_SELF
#define ARM64_THREAD_REG tpidr_el1
#endif /* ARCH_USING_HW_THREAD_SELF */
#ifdef __ASSEMBLY__
/*********************
* CONTEXT_OFFSET *
*********************/
#define CONTEXT_OFFSET_ELR_EL1 0x0
#define CONTEXT_OFFSET_SPSR_EL1 0x8
#define CONTEXT_OFFSET_SP_EL0 0x10
#define CONTEXT_OFFSET_X30 0x18
#define CONTEXT_OFFSET_FPCR 0x20
#define CONTEXT_OFFSET_FPSR 0x28
#define CONTEXT_OFFSET_X28 0x30
#define CONTEXT_OFFSET_X29 0x38
#define CONTEXT_OFFSET_X26 0x40
#define CONTEXT_OFFSET_X27 0x48
#define CONTEXT_OFFSET_X24 0x50
#define CONTEXT_OFFSET_X25 0x58
#define CONTEXT_OFFSET_X22 0x60
#define CONTEXT_OFFSET_X23 0x68
#define CONTEXT_OFFSET_X20 0x70
#define CONTEXT_OFFSET_X21 0x78
#define CONTEXT_OFFSET_X18 0x80
#define CONTEXT_OFFSET_X19 0x88
#define CONTEXT_OFFSET_X16 0x90
#define CONTEXT_OFFSET_X17 0x98
#define CONTEXT_OFFSET_X14 0xa0
#define CONTEXT_OFFSET_X15 0xa8
#define CONTEXT_OFFSET_X12 0xb0
#define CONTEXT_OFFSET_X13 0xb8
#define CONTEXT_OFFSET_X10 0xc0
#define CONTEXT_OFFSET_X11 0xc8
#define CONTEXT_OFFSET_X8 0xd0
#define CONTEXT_OFFSET_X9 0xd8
#define CONTEXT_OFFSET_X6 0xe0
#define CONTEXT_OFFSET_X7 0xe8
#define CONTEXT_OFFSET_X4 0xf0
#define CONTEXT_OFFSET_X5 0xf8
#define CONTEXT_OFFSET_X2 0x100
#define CONTEXT_OFFSET_X3 0x108
#define CONTEXT_OFFSET_X0 0x110
#define CONTEXT_OFFSET_X1 0x118
#define CONTEXT_OFFSET_Q31 0x120
#define CONTEXT_OFFSET_Q30 0x130
#define CONTEXT_OFFSET_Q29 0x140
#define CONTEXT_OFFSET_Q28 0x150
#define CONTEXT_OFFSET_Q27 0x160
#define CONTEXT_OFFSET_Q26 0x170
#define CONTEXT_OFFSET_Q25 0x180
#define CONTEXT_OFFSET_Q24 0x190
#define CONTEXT_OFFSET_Q23 0x1a0
#define CONTEXT_OFFSET_Q22 0x1b0
#define CONTEXT_OFFSET_Q21 0x1c0
#define CONTEXT_OFFSET_Q20 0x1d0
#define CONTEXT_OFFSET_Q19 0x1e0
#define CONTEXT_OFFSET_Q18 0x1f0
#define CONTEXT_OFFSET_Q17 0x200
#define CONTEXT_OFFSET_Q16 0x210
#define CONTEXT_OFFSET_Q15 0x220
#define CONTEXT_OFFSET_Q14 0x230
#define CONTEXT_OFFSET_Q13 0x240
#define CONTEXT_OFFSET_Q12 0x250
#define CONTEXT_OFFSET_Q11 0x260
#define CONTEXT_OFFSET_Q10 0x270
#define CONTEXT_OFFSET_Q9 0x280
#define CONTEXT_OFFSET_Q8 0x290
#define CONTEXT_OFFSET_Q7 0x2a0
#define CONTEXT_OFFSET_Q6 0x2b0
#define CONTEXT_OFFSET_Q5 0x2c0
#define CONTEXT_OFFSET_Q4 0x2d0
#define CONTEXT_OFFSET_Q3 0x2e0
#define CONTEXT_OFFSET_Q2 0x2f0
#define CONTEXT_OFFSET_Q1 0x300
#define CONTEXT_OFFSET_Q0 0x310
#define CONTEXT_FPU_SIZE (32 * 16)
#define CONTEXT_SIZE (0x120 + CONTEXT_FPU_SIZE)
#else /* !__ASSEMBLY__ */
#include <rttypes.h>
typedef struct { rt_uint64_t value[2]; } rt_uint128_t;
/* the exception stack without VFP registers */
struct rt_hw_exp_stack
{
rt_uint64_t pc;
rt_uint64_t cpsr;
rt_uint64_t sp_el0;
rt_uint64_t x30;
rt_uint64_t fpcr;
rt_uint64_t fpsr;
rt_uint64_t x28;
rt_uint64_t x29;
rt_uint64_t x26;
rt_uint64_t x27;
rt_uint64_t x24;
rt_uint64_t x25;
rt_uint64_t x22;
rt_uint64_t x23;
rt_uint64_t x20;
rt_uint64_t x21;
rt_uint64_t x18;
rt_uint64_t x19;
rt_uint64_t x16;
rt_uint64_t x17;
rt_uint64_t x14;
rt_uint64_t x15;
rt_uint64_t x12;
rt_uint64_t x13;
rt_uint64_t x10;
rt_uint64_t x11;
rt_uint64_t x8;
rt_uint64_t x9;
rt_uint64_t x6;
rt_uint64_t x7;
rt_uint64_t x4;
rt_uint64_t x5;
rt_uint64_t x2;
rt_uint64_t x3;
rt_uint64_t x0;
rt_uint64_t x1;
rt_uint128_t fpu[32];
};
void rt_hw_show_register(struct rt_hw_exp_stack *regs);
#define SP_ELx ((unsigned long)0x01)
#define SP_EL0 ((unsigned long)0x00)
#define PSTATE_EL1 ((unsigned long)0x04)
#define PSTATE_EL2 ((unsigned long)0x08)
#define PSTATE_EL3 ((unsigned long)0x0c)
rt_ubase_t rt_hw_get_current_el(void);
void rt_hw_set_elx_env(void);
void rt_hw_set_current_vbar(rt_ubase_t addr);
/* ESR:generic */
#define ARM64_ABORT_WNR(esr) ((esr) & 0x40)
#define ARM64_ESR_EXTRACT_EC(esr) ((((esr) >> 26) & 0x3fU))
#define ARM64_ESR_EXTRACT_FSC(esr) ((esr) & 0x3f)
/* ESR:EC */
#define ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION (0b100000)
#define ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE (0b100001)
#define ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION (0b100100)
#define ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE (0b100101)
/* ESR:FSC */
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_0 (0b000100)
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_1 (0b000101)
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_2 (0b000110)
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_3 (0b000111)
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_0 (0b001100)
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_1 (0b001101)
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_2 (0b001110)
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_3 (0b001111)
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0 (0b001000)
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1 (0b001001)
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2 (0b001010)
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3 (0b001011)
#endif /* __ASSEMBLY__ */
#endif

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-07-13 GuEe-GUI append Q16 ~ Q31
*/
#ifndef __ARM64_ASM_FPU_H__
#define __ARM64_ASM_FPU_H__
.macro SAVE_FPU, reg
str q0, [\reg, #-0x10]!
str q1, [\reg, #-0x10]!
str q2, [\reg, #-0x10]!
str q3, [\reg, #-0x10]!
str q4, [\reg, #-0x10]!
str q5, [\reg, #-0x10]!
str q6, [\reg, #-0x10]!
str q7, [\reg, #-0x10]!
str q8, [\reg, #-0x10]!
str q9, [\reg, #-0x10]!
str q10, [\reg, #-0x10]!
str q11, [\reg, #-0x10]!
str q12, [\reg, #-0x10]!
str q13, [\reg, #-0x10]!
str q14, [\reg, #-0x10]!
str q15, [\reg, #-0x10]!
str q16, [\reg, #-0x10]!
str q17, [\reg, #-0x10]!
str q18, [\reg, #-0x10]!
str q19, [\reg, #-0x10]!
str q20, [\reg, #-0x10]!
str q21, [\reg, #-0x10]!
str q22, [\reg, #-0x10]!
str q23, [\reg, #-0x10]!
str q24, [\reg, #-0x10]!
str q25, [\reg, #-0x10]!
str q26, [\reg, #-0x10]!
str q27, [\reg, #-0x10]!
str q28, [\reg, #-0x10]!
str q29, [\reg, #-0x10]!
str q30, [\reg, #-0x10]!
str q31, [\reg, #-0x10]!
.endm
.macro RESTORE_FPU, reg
ldr q31, [\reg], #0x10
ldr q30, [\reg], #0x10
ldr q29, [\reg], #0x10
ldr q28, [\reg], #0x10
ldr q27, [\reg], #0x10
ldr q26, [\reg], #0x10
ldr q25, [\reg], #0x10
ldr q24, [\reg], #0x10
ldr q23, [\reg], #0x10
ldr q22, [\reg], #0x10
ldr q21, [\reg], #0x10
ldr q20, [\reg], #0x10
ldr q19, [\reg], #0x10
ldr q18, [\reg], #0x10
ldr q17, [\reg], #0x10
ldr q16, [\reg], #0x10
ldr q15, [\reg], #0x10
ldr q14, [\reg], #0x10
ldr q13, [\reg], #0x10
ldr q12, [\reg], #0x10
ldr q11, [\reg], #0x10
ldr q10, [\reg], #0x10
ldr q9, [\reg], #0x10
ldr q8, [\reg], #0x10
ldr q7, [\reg], #0x10
ldr q6, [\reg], #0x10
ldr q5, [\reg], #0x10
ldr q4, [\reg], #0x10
ldr q3, [\reg], #0x10
ldr q2, [\reg], #0x10
ldr q1, [\reg], #0x10
ldr q0, [\reg], #0x10
.endm
#endif /* __ARM64_ASM_FPU_H__ */

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2006-2023 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-03-12 WangXiaoyao the first version
*/
#ifndef __ASM_GENERIC_H__
#define __ASM_GENERIC_H__
/* use to mark a start point where every task start from */
#define START_POINT(funcname) \
.global funcname; \
.type funcname, %function; \
funcname: \
.cfi_sections .debug_frame, .eh_frame; \
.cfi_startproc; \
.cfi_undefined x30
#define START_POINT_END(name) \
.cfi_endproc; \
.size name, .-name;
#define TRACE_SYMBOL(name)
.macro NEVER_RETURN
#ifdef RT_USING_DEBUG
b .
#endif /* RT_USING_DEBUG */
.endm
.macro GET_THREAD_SELF, dst:req
#ifdef ARCH_USING_HW_THREAD_SELF
mrs x0, tpidr_el1
#else /* !ARCH_USING_HW_THREAD_SELF */
bl rt_thread_self
#endif /* ARCH_USING_HW_THREAD_SELF */
.if \dst != x0
mov dst, x0
.endif
.endm
#endif /* __ASM_GENERIC_H__ */

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-18 RT-Thread the first version
*/
#ifndef __CACHE_H__
#define __CACHE_H__
#include <rtdef.h>
void __asm_invalidate_icache_all(void);
void rt_hw_dcache_flush_all(void);
void rt_hw_dcache_invalidate_all(void);
void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size);
void rt_hw_cpu_dcache_clean(void *addr, unsigned long size);
void rt_hw_cpu_dcache_invalidate(void *start_addr, unsigned long size);
static inline void rt_hw_icache_invalidate_all(void)
{
/* wait for previous modification to complete */
__asm__ volatile ("dsb ishst");
__asm__ volatile ("ic ialluis");
/* wait for ic to retire */
__asm__ volatile ("dsb nsh");
/* flush instruction pipeline */
__asm__ volatile ("isb");
}
void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size);
void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, rt_size_t size);
#endif /* __CACHE_H__ */

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ARM64_INC_CONTEXT_H__
#define __ARM64_INC_CONTEXT_H__
#include "armv8.h"
.macro SAVE_CONTEXT_SWITCH, tmpx, tmp2x
/* Save the entire context. */
SAVE_FPU sp
stp x19, x20, [sp, #-0x10]!
stp x21, x22, [sp, #-0x10]!
stp x23, x24, [sp, #-0x10]!
stp x25, x26, [sp, #-0x10]!
stp x27, x28, [sp, #-0x10]!
mrs \tmpx, sp_el0
stp x29, \tmpx, [sp, #-0x10]!
mrs \tmpx, fpcr
mrs \tmp2x, fpsr
stp \tmpx, \tmp2x, [sp, #-0x10]!
mov \tmpx, #((3 << 6) | 0x5) /* el1h, disable interrupt */
stp x30, \tmpx, [sp, #-0x10]!
.endm
.macro SAVE_CONTEXT_SWITCH_FAST
/* Save the entire context. */
add sp, sp, #-1 * CONTEXT_FPU_SIZE
add sp, sp, #-7 * 16
mov x19, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
stp lr, x19, [sp, #-0x10]!
.endm
.macro _RESTORE_CONTEXT_SWITCH
ldp x30, x19, [sp], #0x10 /* SPSR and ELR. */
msr elr_el1, x30
msr spsr_el1, x19
/* restore NEON */
ldp x19, x20, [sp], #0x10
msr fpcr, x19
msr fpsr, x20
ldp x29, x19, [sp], #0x10
msr sp_el0, x19
ldp x27, x28, [sp], #0x10
ldp x25, x26, [sp], #0x10
ldp x23, x24, [sp], #0x10
ldp x21, x22, [sp], #0x10
ldp x19, x20, [sp], #0x10
RESTORE_FPU sp
eret
.endm
#endif /* __ARM64_INC_CONTEXT_H__ */

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
*/
#ifndef __CP15_H__
#define __CP15_H__
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
#endif
#define __WFI() __asm__ volatile ("wfi":::"memory")
#define __WFE() __asm__ volatile ("wfe":::"memory")
#define __SEV() __asm__ volatile ("sev")
__STATIC_FORCEINLINE void __ISB(void)
{
__asm__ volatile ("isb 0xF":::"memory");
}
/**
\brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
__STATIC_FORCEINLINE void __DSB(void)
{
__asm__ volatile ("dsb 0xF":::"memory");
}
/**
\brief Data Memory Barrier
\details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
__STATIC_FORCEINLINE void __DMB(void)
{
__asm__ volatile ("dmb 0xF":::"memory");
}
unsigned long rt_cpu_get_smp_id(void);
void rt_cpu_mmu_disable(void);
void rt_cpu_mmu_enable(void);
void rt_cpu_tlb_set(volatile unsigned long*);
void rt_cpu_dcache_clean_flush(void);
void rt_cpu_icache_flush(void);
void rt_cpu_vector_set_base(rt_ubase_t addr);
void rt_hw_mmu_init(void);
void rt_hw_vector_init(void);
void set_timer_counter(unsigned int counter);
void set_timer_control(unsigned int control);
#endif

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __RT_HW_CPU_H__
#define __RT_HW_CPU_H__
#include <rtdef.h>
#include <cpuport.h>
#include <mm_aspace.h>
#ifdef RT_USING_OFW
#include <drivers/ofw.h>
#endif
#define ID_ERROR __INT64_MAX__
#define MPIDR_AFFINITY_MASK 0x000000ff00ffffffUL
struct cpu_ops_t
{
const char *method;
int (*cpu_init)(rt_uint32_t id, void *param);
int (*cpu_boot)(rt_uint32_t id, rt_uint64_t entry);
void (*cpu_shutdown)(void);
};
#define sysreg_32(op1, crn, crm, op2) s3_##op1 ##_##crn ##_##crm ##_##op2
#define sysreg_64(op1, crn, crm, op2) sysreg_32(op1, crn, crm, op2)
#define MPIDR_AFFINITY_MASK 0x000000ff00ffffffUL
#define MPIDR_LEVEL_BITS_SHIFT 3
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
#define MPIDR_LEVEL_SHIFT(level) (((1 << (level)) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
#define MPIDR_AFFINITY_LEVEL(mpidr, level) (((mpidr) >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
/* GIC registers */
#define ICC_IAR0_SYS sysreg_64(0, c12, c8, 0)
#define ICC_IAR1_SYS sysreg_64(0, c12, c12, 0)
#define ICC_EOIR0_SYS sysreg_64(0, c12, c8, 1)
#define ICC_EOIR1_SYS sysreg_64(0, c12, c12, 1)
#define ICC_HPPIR0_SYS sysreg_64(0, c12, c8, 2)
#define ICC_HPPIR1_SYS sysreg_64(0, c12, c12, 2)
#define ICC_BPR0_SYS sysreg_64(0, c12, c8, 3)
#define ICC_BPR1_SYS sysreg_64(0, c12, c12, 3)
#define ICC_DIR_SYS sysreg_64(0, c12, c11, 1)
#define ICC_PMR_SYS sysreg_64(0, c4, c6, 0)
#define ICC_RPR_SYS sysreg_64(0, c12, c11, 3)
#define ICC_CTLR_SYS sysreg_64(0, c12, c12, 4)
#define ICC_SRE_SYS sysreg_64(0, c12, c12, 5)
#define ICC_IGRPEN0_SYS sysreg_64(0, c12, c12, 6)
#define ICC_IGRPEN1_SYS sysreg_64(0, c12, c12, 7)
#define ICC_SGI0R_SYS sysreg_64(0, c12, c11, 7)
#define ICC_SGI1R_SYS sysreg_64(0, c12, c11, 5)
#define ICC_ASGI1R_SYS sysreg_64(0, c12, c11, 6)
/* Arch timer registers */
#define CNTP_CTL CNTP_CTL_EL0 /* EL1 Physical Timer */
#define CNTHP_CTL CNTHP_CTL_EL2 /* EL2 Non-secure Physical Timer */
#define CNTHPS_CTL CNTHPS_CTL_EL2 /* EL2 Secure Physical Timer */
#define CNTPS_CTL CNTPS_CTL_EL1 /* EL3 Physical Timer */
#define CNTV_CTL CNTV_CTL_EL0 /* EL1 Virtual Timer */
#define CNTHV_CTL CNTHV_CTL_EL2 /* EL2 Non-secure Virtual Timer */
#define CNTHVS_CTL CNTHVS_CTL_EL2 /* EL2 Secure Virtual Timer */
#define CNTP_CVAL CNTP_CVAL_EL0
#define CNTHP_CVAL CNTHP_CVAL_EL2
#define CNTHPS_CVAL CNTHPS_CVAL_EL2
#define CNTPS_CVAL CNTPS_CVAL_EL1
#define CNTV_CVAL CNTV_CVAL_EL0
#define CNTHV_CVAL CNTHV_CVAL_EL2
#define CNTHVS_CVAL CNTHVS_CVAL_EL2
#define CNTP_TVAL CNTP_TVAL_EL0
#define CNTHP_TVAL CNTHP_TVAL_EL2
#define CNTHPS_TVAL CNTHPS_TVAL_EL2
#define CNTPS_TVAL CNTPS_TVAL_EL1
#define CNTV_TVAL CNTV_TVAL_EL0
#define CNTHV_TVAL CNTHV_TVAL_EL2
#define CNTHVS_TVAL CNTHVS_TVAL_EL2
#define CNTPCT CNTPCT_EL0
#define CNTVCT CNTVCT_EL0
#define CNTFRQ CNTFRQ_EL0
extern rt_uint64_t rt_cpu_mpidr_table[];
#endif /* __RT_HW_CPU_H__ */

View File

@ -0,0 +1,21 @@
#ifndef __CPU_OPS_COMMON_H__
#define __CPU_OPS_COMMON_H__
#include <rthw.h>
#include <rtthread.h>
#include <mmu.h>
#include "entry_point.h"
static inline rt_uint64_t get_secondary_entry_pa(void)
{
rt_uint64_t secondary_entry_pa = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
if (!secondary_entry_pa)
{
LOG_E("Failed to translate 'secondary_entry_pa' to physical address");
return 0;
}
return secondary_entry_pa;
}
#endif /* __CPU_OPS_COMMON_H__ */

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-10-25 Shell Move ffs to cpuport, add general implementation
* by inline assembly
* 2024-01-18 Shell support rt_hw_thread_self to improve overall performance
*/
#ifndef CPUPORT_H__
#define CPUPORT_H__
#include <armv8.h>
#include <rtcompiler.h>
#include <rttypes.h>
#ifdef RT_USING_SMP
/**
* Spinlock
*/
typedef struct
{
rt_uint32_t value;
} rt_hw_spinlock_t;
#endif /* RT_USING_SMP */
#define rt_hw_barrier(cmd, ...) \
__asm__ volatile (RT_STRINGIFY(cmd) " "RT_STRINGIFY(__VA_ARGS__):::"memory")
#define rt_hw_isb() rt_hw_barrier(isb)
#define rt_hw_dmb() rt_hw_barrier(dmb, ish)
#define rt_hw_wmb() rt_hw_barrier(dmb, ishst)
#define rt_hw_rmb() rt_hw_barrier(dmb, ishld)
#define rt_hw_dsb() rt_hw_barrier(dsb, ish)
#define rt_hw_wfi() rt_hw_barrier(wfi)
#define rt_hw_wfe() rt_hw_barrier(wfe)
#define rt_hw_sev() rt_hw_barrier(sev)
#define rt_hw_cpu_relax() rt_hw_barrier(yield)
#define rt_hw_sysreg_write(sysreg, val) \
__asm__ volatile ("msr "RT_STRINGIFY(sysreg)", %0"::"r"((rt_uint64_t)(val)))
#define rt_hw_sysreg_read(sysreg, val) \
__asm__ volatile ("mrs %0, "RT_STRINGIFY(sysreg)"":"=r"((val)))
void _thread_start(void);
#ifdef ARCH_USING_HW_THREAD_SELF
rt_inline struct rt_thread *rt_hw_thread_self(void)
{
struct rt_thread *thread;
__asm__ volatile ("mrs %0, " RT_STRINGIFY(ARM64_THREAD_REG) :"=r"(thread));
return thread;
}
rt_inline void rt_hw_thread_set_self(struct rt_thread *thread)
{
__asm__ volatile ("msr " RT_STRINGIFY(ARM64_THREAD_REG) ", %0"::"r"(thread));
}
#endif /* ARCH_USING_HW_THREAD_SELF */
#endif /*CPUPORT_H__*/

View File

@ -0,0 +1,13 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __ENTRY_POINT_H__
#define __ENTRY_POINT_H__
extern void _secondary_cpu_entry(void);
#endif /* __ENTRY_POINT_H__ */

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
*/
#ifndef __GIC_H__
#define __GIC_H__
#include <rthw.h>
#include <board.h>
int arm_gic_get_active_irq(rt_uint64_t index);
void arm_gic_ack(rt_uint64_t index, int irq);
void arm_gic_mask(rt_uint64_t index, int irq);
void arm_gic_umask(rt_uint64_t index, int irq);
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq);
void arm_gic_set_pending_irq(rt_uint64_t index, int irq);
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq);
void arm_gic_set_configuration(rt_uint64_t index, int irq, uint32_t config);
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq);
void arm_gic_clear_active(rt_uint64_t index, int irq);
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask);
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq);
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority);
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq);
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority);
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index);
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point);
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index);
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq);
void arm_gic_send_sgi(rt_uint64_t index, int irq, rt_uint64_t target_list, rt_uint64_t filter_list);
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index);
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index);
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group);
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq);
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start);
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base);
void arm_gic_dump_type(rt_uint64_t index);
void arm_gic_dump(rt_uint64_t index);
#endif

View File

@ -0,0 +1,198 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
*/
#ifndef __GICV3_H__
#define __GICV3_H__
#include <rtdef.h>
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3)
#ifndef ARM_GIC_CPU_NUM
#define ARM_GIC_CPU_NUM RT_CPUS_NR
#endif
#define GICV3_ROUTED_TO_ALL 1UL
#define GICV3_ROUTED_TO_SPEC 0UL
#define GET_GICV3_REG(reg, out) __asm__ volatile ("mrs %0, " reg:"=r"(out)::"memory");
#define SET_GICV3_REG(reg, in) __asm__ volatile ("msr " reg ", %0"::"r"(in):"memory");
/* AArch64 System register interface to GICv3 */
#define ICC_IAR0_EL1 "S3_0_C12_C8_0"
#define ICC_IAR1_EL1 "S3_0_C12_C12_0"
#define ICC_EOIR0_EL1 "S3_0_C12_C8_1"
#define ICC_EOIR1_EL1 "S3_0_C12_C12_1"
#define ICC_HPPIR0_EL1 "S3_0_C12_C8_2"
#define ICC_HPPIR1_EL1 "S3_0_C12_C12_2"
#define ICC_BPR0_EL1 "S3_0_C12_C8_3"
#define ICC_BPR1_EL1 "S3_0_C12_C12_3"
#define ICC_DIR_EL1 "S3_0_C12_C11_1"
#define ICC_PMR_EL1 "S3_0_C4_C6_0"
#define ICC_RPR_EL1 "S3_0_C12_C11_3"
#define ICC_CTLR_EL1 "S3_0_C12_C12_4"
#define ICC_CTLR_EL3 "S3_6_C12_C12_4"
#define ICC_SRE_EL1 "S3_0_C12_C12_5"
#define ICC_SRE_EL2 "S3_4_C12_C9_5"
#define ICC_SRE_EL3 "S3_6_C12_C12_5"
#define ICC_IGRPEN0_EL1 "S3_0_C12_C12_6"
#define ICC_IGRPEN1_EL1 "S3_0_C12_C12_7"
#define ICC_IGRPEN1_EL3 "S3_6_C12_C12_7"
#define ICC_SGI0R_EL1 "S3_0_C12_C11_7"
#define ICC_SGI1R_EL1 "S3_0_C12_C11_5"
#define ICC_ASGI1R_EL1 "S3_0_C12_C11_6"
/* Macro to access the Distributor Control Register (GICD_CTLR) */
#define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_E1NWF (1U << 7)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 5)
#define GICD_CTLR_ARE_S (1U << 4)
#define GICD_CTLR_ENGRP1S (1U << 2)
#define GICD_CTLR_ENGRP1NS (1U << 1)
#define GICD_CTLR_ENGRP0 (1U << 0)
/* Macro to access the Redistributor Control Register (GICR_CTLR) */
#define GICR_CTLR_UWP (1U << 31)
#define GICR_CTLR_DPG1S (1U << 26)
#define GICR_CTLR_DPG1NS (1U << 25)
#define GICR_CTLR_DPG0 (1U << 24)
#define GICR_CTLR_RWP (1U << 3)
#define GICR_CTLR_IR (1U << 2)
#define GICR_CTLR_CES (1U << 1)
#define GICR_CTLR_EnableLPI (1U << 0)
/* Macro to access the Generic Interrupt Controller Interface (GICC) */
#define GIC_CPU_CTRL(hw_base) HWREG32((hw_base) + 0x00U)
#define GIC_CPU_PRIMASK(hw_base) HWREG32((hw_base) + 0x04U)
#define GIC_CPU_BINPOINT(hw_base) HWREG32((hw_base) + 0x08U)
#define GIC_CPU_INTACK(hw_base) HWREG32((hw_base) + 0x0cU)
#define GIC_CPU_EOI(hw_base) HWREG32((hw_base) + 0x10U)
#define GIC_CPU_RUNNINGPRI(hw_base) HWREG32((hw_base) + 0x14U)
#define GIC_CPU_HIGHPRI(hw_base) HWREG32((hw_base) + 0x18U)
#define GIC_CPU_IIDR(hw_base) HWREG32((hw_base) + 0xFCU)
/* Macro to access the Generic Interrupt Controller Distributor (GICD) */
#define GIC_DIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U)
#define GIC_DIST_TYPE(hw_base) HWREG32((hw_base) + 0x004U)
#define GIC_DIST_IIDR(hw_base) HWREG32((hw_base) + 0x008U)
#define GIC_DIST_IGROUP(hw_base, n) HWREG32((hw_base) + 0x080U + ((n) / 32U) * 4U)
#define GIC_DIST_ENABLE_SET(hw_base, n) HWREG32((hw_base) + 0x100U + ((n) / 32U) * 4U)
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x180U + ((n) / 32U) * 4U)
#define GIC_DIST_PENDING_SET(hw_base, n) HWREG32((hw_base) + 0x200U + ((n) / 32U) * 4U)
#define GIC_DIST_PENDING_CLEAR(hw_base, n) HWREG32((hw_base) + 0x280U + ((n) / 32U) * 4U)
#define GIC_DIST_ACTIVE_SET(hw_base, n) HWREG32((hw_base) + 0x300U + ((n) / 32U) * 4U)
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x380U + ((n) / 32U) * 4U)
#define GIC_DIST_PRI(hw_base, n) HWREG32((hw_base) + 0x400U + ((n) / 4U) * 4U)
#define GIC_DIST_TARGET(hw_base, n) HWREG32((hw_base) + 0x800U + ((n) / 4U) * 4U)
#define GIC_DIST_CONFIG(hw_base, n) HWREG32((hw_base) + 0xc00U + ((n) / 16U) * 4U)
#define GIC_DIST_SOFTINT(hw_base) HWREG32((hw_base) + 0xf00U)
#define GIC_DIST_CPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf10U + ((n) / 4U) * 4U)
#define GIC_DIST_SPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf20U + ((n) / 4U) * 4U)
#define GIC_DIST_ICPIDR2(hw_base) HWREG32((hw_base) + 0xfe8U)
#define GIC_DIST_IROUTER(hw_base, n) HWREG64((hw_base) + 0x6000U + (n) * 8U)
/* SGI base address is at 64K offset from Redistributor base address */
#define GIC_RSGI_OFFSET 0x10000
/* Macro to access the Generic Interrupt Controller Redistributor (GICR) */
#define GIC_RDIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U)
#define GIC_RDIST_IIDR(hw_base) HWREG32((hw_base) + 0x004U)
#define GIC_RDIST_TYPER(hw_base) HWREG64((hw_base) + 0x008U)
#define GIC_RDIST_TSTATUSR(hw_base) HWREG32((hw_base) + 0x010U)
#define GIC_RDIST_WAKER(hw_base) HWREG32((hw_base) + 0x014U)
#define GIC_RDIST_SETLPIR(hw_base) HWREG32((hw_base) + 0x040U)
#define GIC_RDIST_CLRLPIR(hw_base) HWREG32((hw_base) + 0x048U)
#define GIC_RDIST_PROPBASER(hw_base) HWREG32((hw_base) + 0x070U)
#define GIC_RDIST_PENDBASER(hw_base) HWREG32((hw_base) + 0x078U)
#define GIC_RDIST_INVLPIR(hw_base) HWREG32((hw_base) + 0x0A0U)
#define GIC_RDIST_INVALLR(hw_base) HWREG32((hw_base) + 0x0B0U)
#define GIC_RDIST_SYNCR(hw_base) HWREG32((hw_base) + 0x0C0U)
#define GIC_RDISTSGI_IGROUPR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x080U + (n) * 4U)
#define GIC_RDISTSGI_ISENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x100U)
#define GIC_RDISTSGI_ICENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x180U)
#define GIC_RDISTSGI_ISPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x200U)
#define GIC_RDISTSGI_ICPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x280U)
#define GIC_RDISTSGI_ISACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x300U)
#define GIC_RDISTSGI_ICACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x380U)
#define GIC_RDISTSGI_IPRIORITYR(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x400U + ((n) / 4U) * 4U)
#define GIC_RDISTSGI_ICFGR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC00U)
#define GIC_RDISTSGI_ICFGR1(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC04U)
#define GIC_RDISTSGI_IGRPMODR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xD00U + (n) * 4)
#define GIC_RDISTSGI_NSACR(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xE00U)
struct arm_gic
{
rt_uint64_t offset; /* the first interrupt index in the vector table */
rt_uint64_t redist_hw_base[ARM_GIC_CPU_NUM]; /* the pointer of the gic redistributor */
rt_uint64_t dist_hw_base; /* the base address of the gic distributor */
rt_uint64_t cpu_hw_base[ARM_GIC_CPU_NUM]; /* the base address of the gic cpu interface */
};
int arm_gic_get_active_irq(rt_uint64_t index);
void arm_gic_ack(rt_uint64_t index, int irq);
void arm_gic_mask(rt_uint64_t index, int irq);
void arm_gic_umask(rt_uint64_t index, int irq);
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq);
void arm_gic_set_pending_irq(rt_uint64_t index, int irq);
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq);
void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config);
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq);
void arm_gic_clear_active(rt_uint64_t index, int irq);
void arm_gic_set_router_cpu(rt_uint64_t index, int irq, rt_uint64_t aff);
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask);
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq);
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority);
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq);
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority);
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index);
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point);
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index);
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq);
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode);
#endif
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index);
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index);
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group);
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq);
int arm_gic_redist_address_set(rt_uint64_t index, rt_uint64_t redist_addr, int cpu_id);
int arm_gic_cpu_interface_address_set(rt_uint64_t index, rt_uint64_t interface_addr, int cpu_id);
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start);
int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base);
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base);
rt_uint64_t *arm_gic_get_gic_table_addr(void);
void arm_gic_dump_type(rt_uint64_t index);
void arm_gic_dump(rt_uint64_t index);
#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) */
#endif

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-12-20 GuEe-GUI first version
*/
#ifndef __GTIMER_H__
#define __GTIMER_H__
#include <rtdef.h>
void rt_hw_gtimer_init(void);
void rt_hw_gtimer_local_enable(void);
void rt_hw_gtimer_local_disable(void);
void rt_hw_gtimer_enable();
rt_inline void rt_hw_gtimer_disable(void)
{
__asm__ volatile ("msr CNTP_CTL_EL0, xzr":::"memory");
}
void rt_hw_set_gtimer_val(rt_uint64_t value);
rt_uint64_t rt_hw_get_gtimer_val();
rt_uint64_t rt_hw_get_cntpct_val();
rt_uint64_t rt_hw_get_gtimer_frq();
rt_uint64_t rt_hw_set_gtimer_frq(rt_uint64_t value);
#endif /* __GTIMER_H__ */

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-02-24 GuEe-GUI first version
*/
#ifndef __HYPERCALL_H__
#define __HYPERCALL_H__
#include <rtdef.h>
rt_inline rt_uint32_t rt_hw_hypercall(rt_uint32_t w0, rt_uint64_t x1, rt_uint64_t x2,
rt_uint64_t x3, rt_uint64_t x4, rt_uint64_t x5, rt_uint64_t x6, rt_uint32_t w7)
{
register rt_uint64_t ret __asm__ ("x0");
__asm__ volatile ("hvc #0");
return (rt_uint32_t)ret;
}
rt_err_t rt_hv_stage2_map(unsigned long paddr, unsigned long size);
#endif

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-06 Bernard first version
*/
#ifndef __INTERRUPT_H__
#define __INTERRUPT_H__
#include <rthw.h>
#include <board.h>
#define INT_IRQ 0x00
#define INT_FIQ 0x01
#define IRQ_MODE_TRIG_LEVEL (0x00) /* Trigger: level triggered interrupt */
#define IRQ_MODE_TRIG_EDGE (0x01) /* Trigger: edge triggered interrupt */
#define IRQ_MODE_MASK (0x01)
void rt_hw_vector_init(void);
void rt_hw_interrupt_init(void);
void rt_hw_interrupt_mask(int vector);
void rt_hw_interrupt_umask(int vector);
int rt_hw_interrupt_get_irq(void);
void rt_hw_interrupt_ack(int vector);
void rt_hw_interrupt_set_target_cpus(int vector, unsigned long cpu_mask);
unsigned int rt_hw_interrupt_get_target_cpus(int vector);
void rt_hw_interrupt_set_triger_mode(int vector, unsigned int mode);
unsigned int rt_hw_interrupt_get_triger_mode(int vector);
void rt_hw_interrupt_set_pending(int vector);
unsigned int rt_hw_interrupt_get_pending(int vector);
void rt_hw_interrupt_clear_pending(int vector);
void rt_hw_interrupt_set_priority(int vector, unsigned int priority);
unsigned int rt_hw_interrupt_get_priority(int vector);
void rt_hw_interrupt_set_priority_mask(unsigned int priority);
unsigned int rt_hw_interrupt_get_priority_mask(void);
int rt_hw_interrupt_set_prior_group_bits(unsigned int bits);
unsigned int rt_hw_interrupt_get_prior_group_bits(void);
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name);
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler);
#endif
#endif

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-12 RT-Thread the first version
* 2023-08-15 Shell Support more mapping attribution
*/
#ifndef __MMU_H_
#define __MMU_H_
#ifndef __ASSEMBLY__
#include <rtthread.h>
#include <mm_aspace.h>
/* normal memory wra mapping type */
#define NORMAL_MEM 0
/* normal nocache memory mapping type */
#define NORMAL_NOCACHE_MEM 1
/* device mapping type */
#define DEVICE_MEM 2
struct mem_desc
{
unsigned long vaddr_start;
unsigned long vaddr_end;
unsigned long paddr_start;
unsigned long attr;
struct rt_varea varea;
};
#endif /* !__ASSEMBLY__ */
#define RT_HW_MMU_PROT_READ 1
#define RT_HW_MMU_PROT_WRITE 2
#define RT_HW_MMU_PROT_EXECUTE 4
#define RT_HW_MMU_PROT_KERNEL 8
#define RT_HW_MMU_PROT_USER 16
#define RT_HW_MMU_PROT_CACHE 32
#define MMU_ASID_SHIFT 48
#define MMU_NG_SHIFT 11 /* not global bit */
#define MMU_AF_SHIFT 10
#define MMU_SHARED_SHIFT 8
#define MMU_AP_SHIFT 6
#define MMU_MA_SHIFT 2
#define MMU_AP_MASK (0x3 << MMU_AP_SHIFT)
/* we dont support feat detecting for now, so 8-bit is used to fallback */
#define MMU_SUPPORTED_ASID_BITS 8
#define MMU_AP_KAUN 0UL /* kernel r/w, user none */
#define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
#define MMU_AP_KRUN 2UL /* kernel r, user none */
#define MMU_AP_KRUR 3UL /* kernel r, user r */
#define MMU_ATTR_AF (1ul << MMU_AF_SHIFT) /* the access flag */
#define MMU_ATTR_DBM (1ul << 51) /* the dirty bit modifier */
#define MMU_MAP_CUSTOM(ap, mtype, nglobal) \
((0x1UL << MMU_AF_SHIFT) | (0x2UL << MMU_SHARED_SHIFT) | \
((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT)) | \
((rt_ubase_t)(nglobal) << MMU_NG_SHIFT)
#define MMU_MAP_K_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM, 0)
#define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_NOCACHE_MEM, 0)
#define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM, 0)
#define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM, 0)
#define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM, 0)
#define MMU_MAP_U_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_MEM, 1)
#define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM, 1)
#define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM, 1)
#define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM, 1)
#define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM, 1)
#define MMU_MAP_TRACE(attr) ((attr) & ~(MMU_ATTR_AF | MMU_ATTR_DBM))
#define ARCH_SECTION_SHIFT 21
#define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
#define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
#define ARCH_PAGE_SHIFT 12
#define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
#define ARCH_PAGE_TBL_SHIFT 12
#define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
#define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
#define ARCH_VADDR_WIDTH 48
#define ARCH_ADDRESS_WIDTH_BITS 64
#define MMU_MAP_ERROR_VANOTALIGN -1
#define MMU_MAP_ERROR_PANOTALIGN -2
#define MMU_MAP_ERROR_NOPAGE -3
#define MMU_MAP_ERROR_CONFLICT -4
#define ARCH_MAP_FAILED ((void *)0x1ffffffffffff)
#define ARCH_EARLY_MAP_SIZE (0x40000000)
/* this is big enough for even 16TB first-time mapping */
#define ARCH_PAGE_INIT_THRESHOLD (0x10000000)
#ifndef __ASSEMBLY__
struct rt_aspace;
void rt_hw_mmu_ktbl_set(unsigned long tbl);
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
unsigned long size, unsigned long pv_off);
void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc,
int desc_nr);
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size, size_t *vtable, size_t pv_off);
void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
size_t size, size_t attr);
void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size);
void rt_hw_aspace_switch(struct rt_aspace *aspace);
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, rt_size_t vaddr_start,
rt_size_t size);
void *rt_hw_mmu_pgtbl_create(void);
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
void *rt_hw_mmu_tbl_get(void);
static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)
{
rt_ubase_t par;
void *paddr;
__asm__ volatile("at s1e1w, %0"::"r"(v_addr):"memory");
__asm__ volatile("mrs %0, par_el1":"=r"(par)::"memory");
if (par & 0x1)
{
paddr = ARCH_MAP_FAILED;
}
else
{
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
par &= MMU_ADDRESS_MASK;
par |= (rt_ubase_t)v_addr & ARCH_PAGE_MASK;
paddr = (void *)par;
}
return paddr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr = (attr & ~MMU_AP_MASK) | (MMU_AP_KAUA << MMU_AP_SHIFT);
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
if (attr & 0x40)
attr |= 0x80;
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
{
rt_bool_t rc;
switch (prot)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
if ((attr & MMU_AP_MASK) == (MMU_AP_KAUA << MMU_AP_SHIFT))
rc = RT_TRUE;
else
rc = RT_FALSE;
break;
default:
RT_ASSERT(0);
}
return rc;
}
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd);
#endif /* !__ASSEMBLY__ */
#endif

View File

@ -0,0 +1,151 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-09 GuEe-GUI The first version
*/
#ifndef __PSCI_H__
#define __PSCI_H__
#include <rtdef.h>
/*
* Non-Confidential PSCI 1.0 release (30 January 2015), and errata fix for PSCI 0.2, unsupport PSCI 0.1
*/
/* PSCI 0.2 interface */
#define PSCI_0_2_FN_BASE 0x84000000
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
#define PSCI_0_2_FN_END 0x8400001F
#define PSCI_0_2_FN64_BASE 0xC4000000
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
#define PSCI_0_2_FN64_END 0xC400001F
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4)
#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5)
#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6)
#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7)
#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9)
#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
/* PSCI 1.0 interface */
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
#define PSCI_1_0_FN_CPU_FREEZE PSCI_0_2_FN(11)
#define PSCI_1_0_FN_CPU_DEFAULT_SUSPEND PSCI_0_2_FN(12)
#define PSCI_1_0_FN_NODE_HW_STATE PSCI_0_2_FN(13)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
#define PSCI_1_0_FN_STAT_RESIDENCY PSCI_0_2_FN(16)
#define PSCI_1_0_FN_STAT_COUNT PSCI_0_2_FN(17)
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
#define PSCI_1_0_FN64_STAT_RESIDENCY PSCI_0_2_FN64(16)
#define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17)
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
#define PSCI_VERSION_MAJOR(version) (((version) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(version) ((version) & PSCI_VERSION_MINOR_MASK)
#define PSCI_VERSION(major, min) ((((major) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
((min) & PSCI_VERSION_MINOR_MASK))
/* PSCI affinity level state returned by AFFINITY_INFO */
#define PSCI_AFFINITY_LEVEL_ON 0
#define PSCI_AFFINITY_LEVEL_OFF 1
#define PSCI_AFFINITY_LEVEL_ON_PENDING 2
/*
* PSCI power state
* power_level:
* Level 0: cores
* Level 1: clusters
* Level 2: system
* state_type:
* value 0: standby or retention state
* value 1: powerdown state(entry and context_id is valid)
* state_id:
* StateID
*/
#define PSCI_POWER_STATE_LEVEL_CORES 0
#define PSCI_POWER_STATE_LEVEL_CLUSTERS 1
#define PSCI_POWER_STATE_LEVEL_SYSTEM 2
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
#define PSCI_POWER_LEVEL_SHIFT 24
#define PSCI_POWER_STATE_TYPE_SHIFT 16
#define PSCI_POWER_STATE_ID_SHIFT 0
#define PSCI_POWER_STATE(power_level, state_type, state_id) \
( \
((power_level) << PSCI_POWER_LEVEL_SHIFT) | \
((state_type) << PSCI_POWER_STATE_TYPE_SHIFT) | \
((state_id) << PSCI_POWER_STATE_ID_SHIFT) \
)
#define PSCI_POWER_LEVEL_VAL(state) (((state) >> PSCI_POWER_LEVEL_SHIFT) & 0x3)
#define PSCI_POWER_STATE_TYPE_VAL(state) (((state) >> PSCI_POWER_STATE_TYPE_SHIFT) & 0x1)
#define PSCI_POWER_STATE_ID_VAL(state) (((state) >> PSCI_POWER_STATE_ID_SHIFT) & 0xffff)
/*
* For system, cluster, core
* 0: run
* 1: standby(only core)
* 2: retention
* 3: powerdown
*/
#define PSCI_POWER_STATE_ID_RUN 0
#define PSCI_POWER_STATE_ID_STANDBY 1
#define PSCI_POWER_STATE_ID_RETENTION 2
#define PSCI_POWER_STATE_ID_POWERDOWN 3
#define PSCI_POWER_STATE_ID(state_id_power_level, system, cluster, core) \
( \
((state_id_power_level) << 12) | \
((system) << 8) | \
((cluster) << 4) | \
(core) \
)
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED (-1)
#define PSCI_RET_INVALID_PARAMETERS (-2)
#define PSCI_RET_DENIED (-3)
#define PSCI_RET_ALREADY_ON (-4)
#define PSCI_RET_ON_PENDING (-5)
#define PSCI_RET_INTERNAL_FAILURE (-6)
#define PSCI_RET_NOT_PRESENT (-7)
#define PSCI_RET_DISABLED (-8)
#define PSCI_RET_INVALID_ADDRESS (-9)
void psci_system_off(void);
void psci_system_reboot(void);
rt_uint32_t rt_psci_get_version(void);
rt_uint32_t rt_psci_cpu_on(int cpuid, rt_ubase_t entry_point);
rt_uint32_t rt_psci_cpu_off(rt_uint32_t state);
rt_uint32_t rt_psci_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point);
rt_uint32_t rt_psci_migrate(int cpuid);
rt_uint32_t rt_psci_get_affinity_info(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level);
rt_uint32_t rt_psci_migrate_info_type(void);
#endif /* __PSCI_H__ */

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-21 GuEe-GUI first version
*/
#ifndef __SETUP_H__
#define __SETUP_H__
#include <rtdef.h>
#include <mm_aspace.h>
#ifdef RT_USING_OFW
#include <drivers/ofw_fdt.h>
#endif
void rt_hw_common_setup(void);
#endif /* __SETUP_H__ */

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __SMCCC_H__
#define __SMCCC_H__
/**
* result from SMC/HVC call
* ARM DEN0028E chapter 5,
*/
typedef struct arm_smccc_res_t
{
unsigned long a0;
// reserved for ARM SMC and HVC Fast Call services
unsigned long a1;
unsigned long a2;
unsigned long a3;
} arm_smccc_res_t;
/**
* quirk is a structure contains vendor specified information,
* it just a placeholder currently
*/
struct arm_smccc_quirk_t
{
};
/* smccc version 0.2 */
void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7, struct arm_smccc_res_t *res,
struct arm_smccc_quirk_t *quirk);
void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7, struct arm_smccc_res_t *res,
struct arm_smccc_quirk_t *quirk);
#endif /* __SMCCC_H__ */

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-28 WangXiaoyao the first version
*/
#ifndef __TLB_H__
#define __TLB_H__
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#include "mm_aspace.h"
#include "mmu.h"
#define TLBI_ARG(addr, asid) \
({ \
rt_ubase_t arg = (rt_ubase_t)(addr) >> ARCH_PAGE_SHIFT; \
arg &= (1ull << 44) - 1; \
arg |= (rt_ubase_t)(asid) << MMU_ASID_SHIFT; \
(void *)arg; \
})
static inline void rt_hw_tlb_invalidate_all(void)
{
__asm__ volatile(
// ensure updates to pte completed
"dsb ishst\n"
"tlbi vmalle1is\n"
"dsb ish\n"
// after tlb in new context, refresh inst
"isb\n" ::
: "memory");
}
static inline void rt_hw_tlb_invalidate_all_local(void)
{
__asm__ volatile(
// ensure updates to pte completed
"dsb nshst\n"
"tlbi vmalle1is\n"
"dsb nsh\n"
// after tlb in new context, refresh inst
"isb\n" ::
: "memory");
}
static inline void rt_hw_tlb_invalidate_aspace(rt_aspace_t aspace)
{
#ifdef ARCH_USING_ASID
__asm__ volatile(
// ensure updates to pte completed
"dsb nshst\n"
"tlbi aside1is, %0\n"
"dsb nsh\n"
// after tlb in new context, refresh inst
"isb\n" ::"r"(TLBI_ARG(0ul, aspace->asid))
: "memory");
#else
rt_hw_tlb_invalidate_all();
#endif
}
static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
{
start = TLBI_ARG(start, 0);
__asm__ volatile(
"dsb ishst\n"
"tlbi vaae1is, %0\n"
"dsb ish\n"
"isb\n" ::"r"(start)
: "memory");
}
static inline void rt_hw_tlb_invalidate_range(rt_aspace_t aspace, void *start,
size_t size, size_t stride)
{
if (size <= ARCH_PAGE_SIZE)
{
rt_hw_tlb_invalidate_page(aspace, start);
}
else
{
rt_hw_tlb_invalidate_aspace(aspace);
}
}
#endif /* __TLB_H__ */

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
* 2024-04-08 Shell Optimizing exception switch between u-space/kernel,
*/
#ifndef __ARM64_INC_VECTOR_H__
#define __ARM64_INC_VECTOR_H__
#include "asm-generic.h"
#include <rtconfig.h>
#include <asm-fpu.h>
#include <armv8.h>
.macro SAVE_IRQ_CONTEXT
/* Save the entire context. */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x28, fpcr
mrs x29, fpsr
stp x28, x29, [sp, #-0x10]!
mrs x29, sp_el0
stp x29, x30, [sp, #-0x10]!
mrs x3, spsr_el1
mrs x2, elr_el1
stp x2, x3, [sp, #-0x10]!
.endm
#ifdef RT_USING_SMP
#include "../mp/context_gcc.h"
#else
#include "../up/context_gcc.h"
#endif
.macro RESTORE_IRQ_CONTEXT_NO_SPEL0
ldp x2, x3, [sp], #0x10
msr elr_el1, x2
msr spsr_el1, x3
ldp x29, x30, [sp], #0x10
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
.endm
.macro EXCEPTION_SWITCH, eframex, tmpx
#ifdef RT_USING_SMART
/**
* test the spsr for execution level 0
* That is { PSTATE.[NZCV] := SPSR_EL1 & M.EL0t }
*/
ldr \tmpx, [\eframex, #CONTEXT_OFFSET_SPSR_EL1]
and \tmpx, \tmpx, 0x1f
cbz \tmpx, 1f
b 2f
1:
b arch_ret_to_user
2:
#endif /* RT_USING_SMART */
.endm
.macro SAVE_USER_CTX, eframex, tmpx
#ifdef RT_USING_SMART
mrs \tmpx, spsr_el1
and \tmpx, \tmpx, 0xf
cbz \tmpx, 1f
b 2f
1:
mov x0, \eframex
bl lwp_uthread_ctx_save
2:
#endif /* RT_USING_SMART */
.endm
.macro RESTORE_USER_CTX, eframex, tmpx
#ifdef RT_USING_SMART
ldr \tmpx, [\eframex, #CONTEXT_OFFSET_SPSR_EL1]
and \tmpx, \tmpx, 0x1f
cbz \tmpx, 1f
b 2f
1:
bl lwp_uthread_ctx_restore
2:
#endif /* RT_USING_SMART */
.endm
#endif /* __ARM64_INC_VECTOR_H__ */

View File

@ -0,0 +1,451 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-06 Bernard first version
* 2018-11-22 Jesven add smp support
*/
#include <rthw.h>
#include <rtthread.h>
#include "interrupt.h"
#include "gic.h"
#include "gicv3.h"
#include "ioremap.h"
/* exception and interrupt handler table */
struct rt_irq_desc isr_table[MAX_HANDLERS];
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1
#endif
const unsigned int VECTOR_BASE = 0x00;
extern void rt_cpu_vector_set_base(void *addr);
extern void *system_vectors;
#ifdef RT_USING_SMP
#define rt_interrupt_nest rt_cpu_self()->irq_nest
#else
extern volatile rt_atomic_t rt_interrupt_nest;
#endif
#ifdef SOC_BCM283x
static void default_isr_handler(int vector, void *param)
{
#ifdef RT_USING_SMP
rt_kprintf("cpu %d unhandled irq: %d\n", rt_hw_cpu_id(),vector);
#else
rt_kprintf("unhandled irq: %d\n",vector);
#endif
}
#endif
void rt_hw_vector_init(void)
{
rt_cpu_vector_set_base(&system_vectors);
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
#ifdef SOC_BCM283x
rt_uint32_t index;
/* initialize vector table */
rt_hw_vector_init();
/* initialize exceptions table */
rt_memset(isr_table, 0x00, sizeof(isr_table));
/* mask all of interrupts */
IRQ_DISABLE_BASIC = 0x000000ff;
IRQ_DISABLE1 = 0xffffffff;
IRQ_DISABLE2 = 0xffffffff;
for (index = 0; index < MAX_HANDLERS; index ++)
{
isr_table[index].handler = default_isr_handler;
isr_table[index].param = RT_NULL;
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(isr_table[index].name, "unknown", RT_NAME_MAX);
isr_table[index].counter = 0;
#endif
}
/* init interrupt nest, and context in thread sp */
rt_atomic_store(&rt_interrupt_nest, 0);
#else
rt_uint64_t gic_cpu_base;
rt_uint64_t gic_dist_base;
#ifdef BSP_USING_GICV3
rt_uint64_t gic_rdist_base;
#endif
rt_uint64_t gic_irq_start;
/* initialize vector table */
rt_hw_vector_init();
/* initialize exceptions table */
rt_memset(isr_table, 0x00, sizeof(isr_table));
/* initialize ARM GIC */
#if defined(RT_USING_SMART) || defined(RT_USING_OFW)
gic_dist_base = (rt_uint64_t)rt_ioremap((void*)platform_get_gic_dist_base(), 0x40000);
gic_cpu_base = (rt_uint64_t)rt_ioremap((void*)platform_get_gic_cpu_base(), 0x1000);
#ifdef BSP_USING_GICV3
gic_rdist_base = (rt_uint64_t)rt_ioremap((void*)platform_get_gic_redist_base(),
ARM_GIC_CPU_NUM * (2 << 16));
#endif
#else
gic_dist_base = platform_get_gic_dist_base();
gic_cpu_base = platform_get_gic_cpu_base();
#ifdef BSP_USING_GICV3
gic_rdist_base = platform_get_gic_redist_base();
#endif
#endif
gic_irq_start = GIC_IRQ_START;
arm_gic_dist_init(0, gic_dist_base, gic_irq_start);
arm_gic_cpu_init(0, gic_cpu_base);
#ifdef BSP_USING_GICV3
arm_gic_redist_init(0, gic_rdist_base);
#endif
#endif
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
#ifdef SOC_BCM283x
if (vector < 32)
{
IRQ_DISABLE1 = (1UL << vector);
}
else if (vector < 64)
{
vector = vector % 32;
IRQ_DISABLE2 = (1UL << vector);
}
else
{
vector = vector - 64;
IRQ_DISABLE_BASIC = (1UL << vector);
}
#else
arm_gic_mask(0, vector);
#endif
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
#ifdef SOC_BCM283x
if (vector < 32)
{
IRQ_ENABLE1 = (1UL << vector);
}
else if (vector < 64)
{
vector = vector % 32;
IRQ_ENABLE2 = (1UL << vector);
}
else
{
vector = vector - 64;
IRQ_ENABLE_BASIC = (1UL << vector);
}
#else
arm_gic_umask(0, vector);
#endif
}
/**
* This function returns the active interrupt number.
* @param none
*/
int rt_hw_interrupt_get_irq(void)
{
#ifndef SOC_BCM283x
return arm_gic_get_active_irq(0);
#else
return 0;
#endif
}
/**
* This function acknowledges the interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_ack(int vector)
{
#ifndef SOC_BCM283x
arm_gic_ack(0, vector);
#endif
}
#ifndef SOC_BCM283x
/**
* This function set interrupt CPU targets.
* @param vector: the interrupt number
* cpu_mask: target cpus mask, one bit for one core
*/
void rt_hw_interrupt_set_target_cpus(int vector, unsigned long cpu_mask)
{
#ifdef BSP_USING_GIC
#ifdef BSP_USING_GICV3
arm_gic_set_router_cpu(0, vector, cpu_mask);
#else
arm_gic_set_cpu(0, vector, (unsigned int) cpu_mask);
#endif
#endif
}
/**
* This function get interrupt CPU targets.
* @param vector: the interrupt number
* @return target cpus mask, one bit for one core
*/
unsigned int rt_hw_interrupt_get_target_cpus(int vector)
{
return arm_gic_get_target_cpu(0, vector);
}
/**
* This function set interrupt triger mode.
* @param vector: the interrupt number
* mode: interrupt triger mode; 0: level triger, 1: edge triger
*/
void rt_hw_interrupt_set_triger_mode(int vector, unsigned int mode)
{
arm_gic_set_configuration(0, vector, mode & IRQ_MODE_MASK);
}
/**
* This function get interrupt triger mode.
* @param vector: the interrupt number
* @return interrupt triger mode; 0: level triger, 1: edge triger
*/
unsigned int rt_hw_interrupt_get_triger_mode(int vector)
{
return arm_gic_get_configuration(0, vector);
}
/**
* This function set interrupt pending flag.
* @param vector: the interrupt number
*/
void rt_hw_interrupt_set_pending(int vector)
{
arm_gic_set_pending_irq(0, vector);
}
/**
* This function get interrupt pending flag.
* @param vector: the interrupt number
* @return interrupt pending flag, 0: not pending; 1: pending
*/
unsigned int rt_hw_interrupt_get_pending(int vector)
{
return arm_gic_get_pending_irq(0, vector);
}
/**
* This function clear interrupt pending flag.
* @param vector: the interrupt number
*/
void rt_hw_interrupt_clear_pending(int vector)
{
arm_gic_clear_pending_irq(0, vector);
}
/**
* This function set interrupt priority value.
* @param vector: the interrupt number
* priority: the priority of interrupt to set
*/
void rt_hw_interrupt_set_priority(int vector, unsigned int priority)
{
arm_gic_set_priority(0, vector, priority);
}
/**
* This function get interrupt priority.
* @param vector: the interrupt number
* @return interrupt priority value
*/
unsigned int rt_hw_interrupt_get_priority(int vector)
{
return arm_gic_get_priority(0, vector);
}
/**
* This function set priority masking threshold.
* @param priority: priority masking threshold
*/
void rt_hw_interrupt_set_priority_mask(unsigned int priority)
{
arm_gic_set_interface_prior_mask(0, priority);
}
/**
* This function get priority masking threshold.
* @param none
* @return priority masking threshold
*/
unsigned int rt_hw_interrupt_get_priority_mask(void)
{
return arm_gic_get_interface_prior_mask(0);
}
/**
* This function set priority grouping field split point.
* @param bits: priority grouping field split point
* @return 0: success; -1: failed
*/
int rt_hw_interrupt_set_prior_group_bits(unsigned int bits)
{
int status;
if (bits < 8)
{
arm_gic_set_binary_point(0, (7 - bits));
status = 0;
}
else
{
status = -1;
}
return (status);
}
/**
* This function get priority grouping field split point.
* @param none
* @return priority grouping field split point
*/
unsigned int rt_hw_interrupt_get_prior_group_bits(void)
{
unsigned int bp;
bp = arm_gic_get_binary_point(0) & 0x07;
return (7 - bp);
}
#endif /* SOC_BCM283x */
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if (vector < MAX_HANDLERS)
{
old_handler = isr_table[vector].handler;
if (handler != RT_NULL)
{
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(isr_table[vector].name, name, RT_NAME_MAX);
#endif /* RT_USING_INTERRUPT_INFO */
isr_table[vector].handler = handler;
isr_table[vector].param = param;
}
}
#ifdef BSP_USING_GIC
if (vector > 32)
{
#ifdef BSP_USING_GICV3
rt_uint64_t cpu_affinity_val;
__asm__ volatile ("mrs %0, mpidr_el1":"=r"(cpu_affinity_val));
rt_hw_interrupt_set_target_cpus(vector, cpu_affinity_val);
#else
rt_hw_interrupt_set_target_cpus(vector, 1 << rt_hw_cpu_id());
#endif /* BSP_USING_GICV3 */
}
#endif
return old_handler;
}
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)
{
#ifdef BSP_USING_GICV2
arm_gic_send_sgi(0, ipi_vector, cpu_mask, 0);
#elif defined(BSP_USING_GICV3)
rt_uint32_t gicv3_cpu_mask[(RT_CPUS_NR + 31) >> 5];
gicv3_cpu_mask[0] = cpu_mask;
arm_gic_send_affinity_sgi(0, ipi_vector, gicv3_cpu_mask, GICV3_ROUTED_TO_SPEC);
#endif
}
void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler)
{
/* note: ipi_vector maybe different with irq_vector */
rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER");
}
#endif
#if defined(FINSH_USING_MSH) && defined(RT_USING_INTERRUPT_INFO)
int list_isr()
{
int idx;
rt_kprintf("%-*.*s nr handler param counter ", RT_NAME_MAX, RT_NAME_MAX, "irq");
#ifdef RT_USING_SMP
for (int i = 0; i < RT_CPUS_NR; i++)
{
rt_kprintf(" cpu%2d ", i);
}
#endif
rt_kprintf("\n");
for (int i = 0; i < RT_NAME_MAX; i++)
{
rt_kprintf("-");
}
rt_kprintf(" ---- ------------------ ------------------ ----------------");
#ifdef RT_USING_SMP
for (int i = 0; i < RT_CPUS_NR; i++)
{
rt_kprintf(" -------");
}
#endif
rt_kprintf("\n");
for (idx = 0; idx < MAX_HANDLERS; idx++)
{
if (isr_table[idx].handler != RT_NULL)
{
rt_kprintf("%*.s %4d %p %p %16d", RT_NAME_MAX, isr_table[idx].name, idx, isr_table[idx].handler,
isr_table[idx].param, isr_table[idx].counter);
#ifdef RT_USING_SMP
for (int i = 0; i < RT_CPUS_NR; i++)
rt_kprintf(" %7d", isr_table[idx].cpu_counter[i]);
#endif
rt_kprintf("\n");
}
}
return 0;
}
#include "finsh.h"
MSH_CMD_EXPORT(list_isr, list isr)
#endif

View File

@ -0,0 +1,953 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-10 bernard porting to AM1808
* 2021-11-28 GuEe-GUI first version
* 2022-12-10 WangXiaoyao porting to MM
* 2024-07-08 Shell added support for ASID
*/
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#define __MMU_INTERNAL
#include "mm_aspace.h"
#include "mm_page.h"
#include "mmu.h"
#include "tlb.h"
#include "ioremap.h"
#ifdef RT_USING_SMART
#include <lwp_mm.h>
#endif
#define TCR_CONFIG_TBI0 rt_hw_mmu_config_tbi(0)
#define TCR_CONFIG_TBI1 rt_hw_mmu_config_tbi(1)
#define MMU_LEVEL_MASK 0x1ffUL
#define MMU_LEVEL_SHIFT 9
#define MMU_ADDRESS_BITS 39
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
#define MMU_ATTRIB_MASK 0xfff0000000000ffcUL
#define MMU_TYPE_MASK 3UL
#define MMU_TYPE_USED 1UL
#define MMU_TYPE_BLOCK 1UL
#define MMU_TYPE_TABLE 3UL
#define MMU_TYPE_PAGE 3UL
#define MMU_TBL_BLOCK_2M_LEVEL 2
#define MMU_TBL_PAGE_4k_LEVEL 3
#define MMU_TBL_LEVEL_NR 4
/* restrict virtual address on usage of RT_NULL */
#ifndef KERNEL_VADDR_START
#define KERNEL_VADDR_START 0x1000
#endif
volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
struct mmu_level_info
{
unsigned long *pos;
void *page;
};
static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
{
int level;
unsigned long va = (unsigned long)v_addr;
unsigned long *cur_lv_tbl = lv0_tbl;
unsigned long page;
unsigned long off;
struct mmu_level_info level_info[4];
int ref;
int level_shift = MMU_ADDRESS_BITS;
unsigned long *pos;
rt_memset(level_info, 0, sizeof level_info);
for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
{
off = (va >> level_shift);
off &= MMU_LEVEL_MASK;
page = cur_lv_tbl[off];
if (!(page & MMU_TYPE_USED))
{
break;
}
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
{
break;
}
/* next table entry in current level */
level_info[level].pos = cur_lv_tbl + off;
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
level_info[level].page = cur_lv_tbl;
level_shift -= MMU_LEVEL_SHIFT;
}
level = MMU_TBL_PAGE_4k_LEVEL;
pos = level_info[level].pos;
if (pos)
{
*pos = (unsigned long)RT_NULL;
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
}
level--;
while (level >= 0)
{
pos = level_info[level].pos;
if (pos)
{
void *cur_page = level_info[level].page;
ref = rt_page_ref_get(cur_page, 0);
if (ref == 1)
{
*pos = (unsigned long)RT_NULL;
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
}
rt_pages_free(cur_page, 0);
}
else
{
break;
}
level--;
}
return;
}
static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
{
int ret = 0;
int level;
unsigned long *cur_lv_tbl = lv0_tbl;
unsigned long page;
unsigned long off;
rt_ubase_t va = (rt_ubase_t)vaddr;
rt_ubase_t pa = (rt_ubase_t)paddr;
int level_shift = MMU_ADDRESS_BITS;
if (va & ARCH_PAGE_MASK)
{
return MMU_MAP_ERROR_VANOTALIGN;
}
if (pa & ARCH_PAGE_MASK)
{
return MMU_MAP_ERROR_PANOTALIGN;
}
for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
{
off = (va >> level_shift);
off &= MMU_LEVEL_MASK;
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
{
page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!page)
{
ret = MMU_MAP_ERROR_NOPAGE;
goto err;
}
rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
}
else
{
page = cur_lv_tbl[off];
page &= MMU_ADDRESS_MASK;
/* page to va */
page -= PV_OFFSET;
rt_page_ref_inc((void *)page, 0);
}
page = cur_lv_tbl[off];
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
{
/* is block! error! */
ret = MMU_MAP_ERROR_CONFLICT;
goto err;
}
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
level_shift -= MMU_LEVEL_SHIFT;
}
/* now is level page */
attr &= MMU_ATTRIB_MASK;
pa |= (attr | MMU_TYPE_PAGE); /* page */
off = (va >> ARCH_PAGE_SHIFT);
off &= MMU_LEVEL_MASK;
cur_lv_tbl[off] = pa; /* page */
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
return ret;
err:
_kenrel_unmap_4K(lv0_tbl, (void *)va);
return ret;
}
static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
{
int ret = 0;
int level;
unsigned long *cur_lv_tbl = lv0_tbl;
unsigned long page;
unsigned long off;
unsigned long va = (unsigned long)vaddr;
unsigned long pa = (unsigned long)paddr;
int level_shift = MMU_ADDRESS_BITS;
if (va & ARCH_SECTION_MASK)
{
return MMU_MAP_ERROR_VANOTALIGN;
}
if (pa & ARCH_PAGE_MASK)
{
return MMU_MAP_ERROR_PANOTALIGN;
}
for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
{
off = (va >> level_shift);
off &= MMU_LEVEL_MASK;
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
{
page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!page)
{
ret = MMU_MAP_ERROR_NOPAGE;
goto err;
}
rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
}
else
{
page = cur_lv_tbl[off];
page &= MMU_ADDRESS_MASK;
/* page to va */
page -= PV_OFFSET;
rt_page_ref_inc((void *)page, 0);
}
page = cur_lv_tbl[off];
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
{
/* is block! error! */
ret = MMU_MAP_ERROR_CONFLICT;
goto err;
}
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
level_shift -= MMU_LEVEL_SHIFT;
}
/* now is level page */
attr &= MMU_ATTRIB_MASK;
pa |= (attr | MMU_TYPE_BLOCK); /* block */
off = (va >> ARCH_SECTION_SHIFT);
off &= MMU_LEVEL_MASK;
cur_lv_tbl[off] = pa;
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
return ret;
err:
_kenrel_unmap_4K(lv0_tbl, (void *)va);
return ret;
}
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
size_t attr)
{
int ret = -1;
void *unmap_va = v_addr;
size_t remaining_sz = size;
size_t stride;
int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);
RT_ASSERT(!(size & ARCH_PAGE_MASK));
while (remaining_sz)
{
if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (remaining_sz < ARCH_SECTION_SIZE))
{
/* legacy 4k mapping */
stride = ARCH_PAGE_SIZE;
mapper = _kernel_map_4K;
}
else
{
/* 2m huge page */
stride = ARCH_SECTION_SIZE;
mapper = _kernel_map_2M;
}
/* check aliasing */
#ifdef RT_DEBUGGING_ALIASING
#define _ALIAS_OFFSET(addr) ((long)(addr) & (RT_PAGE_AFFINITY_BLOCK_SIZE - 1))
if (rt_page_is_member((rt_base_t)p_addr) && _ALIAS_OFFSET(v_addr) != _ALIAS_OFFSET(p_addr))
{
LOG_W("Possibly aliasing on va(0x%lx) to pa(0x%lx)", v_addr, p_addr);
rt_backtrace();
RT_ASSERT(0);
}
#endif /* RT_DEBUGGING_ALIASING */
MM_PGTBL_LOCK(aspace);
ret = mapper(aspace->page_table, v_addr, p_addr, attr);
MM_PGTBL_UNLOCK(aspace);
if (ret != 0)
{
/* other types of return value are taken as programming error */
RT_ASSERT(ret == MMU_MAP_ERROR_NOPAGE);
/* error, undo map */
while (unmap_va != v_addr)
{
MM_PGTBL_LOCK(aspace);
_kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
MM_PGTBL_UNLOCK(aspace);
unmap_va = (char *)unmap_va + stride;
}
break;
}
remaining_sz -= stride;
v_addr = (char *)v_addr + stride;
p_addr = (char *)p_addr + stride;
}
if (ret == 0)
{
return unmap_va;
}
return NULL;
}
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
{
// caller guarantee that v_addr & size are page aligned
size_t npages = size >> ARCH_PAGE_SHIFT;
if (!aspace->page_table)
{
return;
}
while (npages--)
{
MM_PGTBL_LOCK(aspace);
if (rt_hw_mmu_v2p(aspace, v_addr) != ARCH_MAP_FAILED)
_kenrel_unmap_4K(aspace->page_table, v_addr);
MM_PGTBL_UNLOCK(aspace);
v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
}
}
#ifdef ARCH_USING_ASID
/**
* the asid is to identified specialized address space on TLB.
* In the best case, each address space has its own exclusive asid. However,
* ARM only guarantee with 8 bits of ID space, which give us only 254(except
* the reserved 1 ASID for kernel).
*/
static rt_spinlock_t _asid_lock = RT_SPINLOCK_INIT;
rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
{
static rt_uint16_t _asid_pool = 0;
rt_uint16_t asid_to, asid_from;
rt_ubase_t ttbr0_from;
asid_to = aspace->asid;
if (asid_to == 0)
{
rt_spin_lock(&_asid_lock);
#define MAX_ASID (1ul << MMU_SUPPORTED_ASID_BITS)
if (_asid_pool && _asid_pool < MAX_ASID)
{
asid_to = ++_asid_pool;
LOG_D("Allocated ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
}
else
{
asid_to = _asid_pool = 1;
LOG_D("Overflowed ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
}
rt_spin_unlock(&_asid_lock);
aspace->asid = asid_to;
rt_hw_tlb_invalidate_aspace(aspace);
}
__asm__ volatile("mrs %0, ttbr0_el1" :"=r"(ttbr0_from));
asid_from = ttbr0_from >> MMU_ASID_SHIFT;
if (asid_from == asid_to)
{
LOG_D("Conflict ASID. from %d, to %d", asid_from, asid_to);
rt_hw_tlb_invalidate_aspace(aspace);
}
else
{
LOG_D("ASID switched. from %d, to %d", asid_from, asid_to);
}
return asid_to;
}
#else
rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
{
rt_hw_tlb_invalidate_all();
return 0;
}
#endif /* ARCH_USING_ASID */
#define CREATE_TTBR0(pgtbl, asid) ((rt_ubase_t)(pgtbl) | (rt_ubase_t)(asid) << MMU_ASID_SHIFT)
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
if (aspace != &rt_kernel_space)
{
rt_ubase_t ttbr0;
void *pgtbl = aspace->page_table;
pgtbl = rt_kmem_v2p(pgtbl);
ttbr0 = CREATE_TTBR0(pgtbl, _aspace_get_asid(aspace));
__asm__ volatile("msr ttbr0_el1, %0" ::"r"(ttbr0));
__asm__ volatile("isb" ::: "memory");
}
}
void rt_hw_mmu_ktbl_set(unsigned long tbl)
{
#ifdef RT_USING_SMART
tbl += PV_OFFSET;
__asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#else
__asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#endif
__asm__ volatile("tlbi vmalle1\n dsb sy\nisb" ::: "memory");
__asm__ volatile("ic ialluis\n dsb sy\nisb" ::: "memory");
}
/**
* @brief setup Page Table for kernel space. It's a fixed map
* and all mappings cannot be changed after initialization.
*
* Memory region in struct mem_desc must be page aligned,
* otherwise is a failure and no report will be
* returned.
*
* @param mmu_info
* @param mdesc
* @param desc_nr
*/
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
{
void *err;
for (size_t i = 0; i < desc_nr; i++)
{
size_t attr;
switch (mdesc->attr)
{
case NORMAL_MEM:
attr = MMU_MAP_K_RWCB;
break;
case NORMAL_NOCACHE_MEM:
attr = MMU_MAP_K_RWCB;
break;
case DEVICE_MEM:
attr = MMU_MAP_K_DEVICE;
break;
default:
attr = MMU_MAP_K_DEVICE;
}
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
.limit_start = aspace->start,
.limit_range_size = aspace->size,
.map_size = mdesc->vaddr_end -
mdesc->vaddr_start + 1,
.prefer = (void *)mdesc->vaddr_start};
if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
int retval;
retval = rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
if (retval)
{
LOG_E("%s: map failed with code %d", __FUNCTION__, retval);
RT_ASSERT(0);
}
mdesc++;
}
rt_hw_mmu_ktbl_set((unsigned long)rt_kernel_space.page_table);
rt_page_cleanup();
}
static void _init_region(void *vaddr, size_t size)
{
rt_ioremap_start = vaddr;
rt_ioremap_size = size;
rt_mpr_start = (char *)rt_ioremap_start - rt_mpr_size;
}
/**
* This function will initialize rt_mmu_info structure.
*
* @param mmu_info rt_mmu_info structure
* @param v_address virtual address
* @param size map size
* @param vtable mmu table
* @param pv_off pv offset in kernel space
*
* @return 0 on successful and -1 for fail
*/
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
size_t *vtable, size_t pv_off)
{
size_t va_s, va_e;
if (!aspace || !vtable)
{
return -1;
}
va_s = (size_t)v_address;
va_e = (size_t)v_address + size - 1;
if (va_e < va_s)
{
return -1;
}
va_s >>= ARCH_SECTION_SHIFT;
va_e >>= ARCH_SECTION_SHIFT;
if (va_s == 0)
{
return -1;
}
rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
vtable);
_init_region(v_address, size);
return 0;
}
rt_weak long rt_hw_mmu_config_tbi(int tbi_index)
{
return 0;
}
/************ setting el1 mmu register**************
MAIR_EL1
index 0 : memory outer writeback, write/read alloc
index 1 : memory nocache
index 2 : device nGnRnE
*****************************************************/
void mmu_tcr_init(void)
{
unsigned long val64;
unsigned long pa_range;
val64 = 0x00447fUL;
__asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n" ::"r"(val64));
__asm__ volatile ("mrs %0, ID_AA64MMFR0_EL1":"=r"(val64));
pa_range = val64 & 0xf; /* PARange */
/* TCR_EL1 */
val64 = (16UL << 0) /* t0sz 48bit */
| (0x0UL << 6) /* reserved */
| (0x0UL << 7) /* epd0 */
| (0x3UL << 8) /* t0 wb cacheable */
| (0x3UL << 10) /* inner shareable */
| (0x2UL << 12) /* t0 outer shareable */
| (0x0UL << 14) /* t0 4K */
| (16UL << 16) /* t1sz 48bit */
| (0x0UL << 22) /* define asid use ttbr0.asid */
| (0x0UL << 23) /* epd1 */
| (0x3UL << 24) /* t1 inner wb cacheable */
| (0x3UL << 26) /* t1 outer wb cacheable */
| (0x2UL << 28) /* t1 outer shareable */
| (0x2UL << 30) /* t1 4k */
| (pa_range << 32) /* PA range */
| (0x0UL << 35) /* reserved */
| (0x1UL << 36) /* as: 0:8bit 1:16bit */
| (TCR_CONFIG_TBI0 << 37) /* tbi0 */
| (TCR_CONFIG_TBI1 << 38); /* tbi1 */
__asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
}
struct page_table
{
unsigned long page[512];
};
/* */
static struct page_table* __init_page_array;
static unsigned long __page_off = 0UL;
unsigned long get_ttbrn_base(void)
{
return (unsigned long) __init_page_array;
}
void set_free_page(void *page_array)
{
__init_page_array = page_array;
}
unsigned long get_free_page(void)
{
return (unsigned long) (__init_page_array[__page_off++].page);
}
static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
unsigned long pa, unsigned long attr,
rt_bool_t flush)
{
int level;
unsigned long *cur_lv_tbl = lv0_tbl;
unsigned long page;
unsigned long off;
int level_shift = MMU_ADDRESS_BITS;
if (va & ARCH_SECTION_MASK)
{
return MMU_MAP_ERROR_VANOTALIGN;
}
if (pa & ARCH_PAGE_MASK)
{
return MMU_MAP_ERROR_PANOTALIGN;
}
for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
{
off = (va >> level_shift);
off &= MMU_LEVEL_MASK;
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
{
page = get_free_page();
if (!page)
{
return MMU_MAP_ERROR_NOPAGE;
}
rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
if (flush)
{
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
}
}
page = cur_lv_tbl[off];
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
{
/* is block! error! */
return MMU_MAP_ERROR_CONFLICT;
}
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
level_shift -= MMU_LEVEL_SHIFT;
}
attr &= MMU_ATTRIB_MASK;
pa |= (attr | MMU_TYPE_BLOCK); /* block */
off = (va >> ARCH_SECTION_SHIFT);
off &= MMU_LEVEL_MASK;
cur_lv_tbl[off] = pa;
if (flush)
{
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
}
return 0;
}
void *rt_hw_mmu_tbl_get(void)
{
uintptr_t tbl;
__asm__ volatile("MRS %0, TTBR0_EL1" : "=r"(tbl));
return rt_kmem_p2v((void *)(tbl & ((1ul << 48) - 2)));
}
void *rt_ioremap_early(void *paddr, size_t size)
{
volatile size_t count;
rt_ubase_t base;
static void *tbl = RT_NULL;
if (!size)
{
return RT_NULL;
}
if (!tbl)
{
tbl = rt_hw_mmu_tbl_get();
}
/* get the total size required including overhead for alignment */
count = (size + ((rt_ubase_t)paddr & ARCH_SECTION_MASK)
+ ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
base = (rt_ubase_t)paddr & (~ARCH_SECTION_MASK);
while (count --> 0)
{
if (_map_single_page_2M(tbl, base, base, MMU_MAP_K_DEVICE, RT_TRUE))
{
return RT_NULL;
}
base += ARCH_SECTION_SIZE;
}
return paddr;
}
static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
unsigned long pa, unsigned long count,
unsigned long attr)
{
unsigned long i;
int ret;
if (va & ARCH_SECTION_MASK)
{
return -1;
}
if (pa & ARCH_SECTION_MASK)
{
return -1;
}
for (i = 0; i < count; i++)
{
ret = _map_single_page_2M(lv0_tbl, va, pa, attr, RT_FALSE);
va += ARCH_SECTION_SIZE;
pa += ARCH_SECTION_SIZE;
if (ret != 0)
{
return ret;
}
}
return 0;
}
static unsigned long *_query(rt_aspace_t aspace, void *vaddr, int *plvl_shf)
{
int level;
unsigned long va = (unsigned long)vaddr;
unsigned long *cur_lv_tbl;
unsigned long page;
unsigned long off;
int level_shift = MMU_ADDRESS_BITS;
cur_lv_tbl = aspace->page_table;
RT_ASSERT(cur_lv_tbl);
for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
{
off = (va >> level_shift);
off &= MMU_LEVEL_MASK;
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
{
*plvl_shf = level_shift;
return (void *)0;
}
page = cur_lv_tbl[off];
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
{
*plvl_shf = level_shift;
return &cur_lv_tbl[off];
}
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
level_shift -= MMU_LEVEL_SHIFT;
}
/* now is level MMU_TBL_PAGE_4k_LEVEL */
off = (va >> ARCH_PAGE_SHIFT);
off &= MMU_LEVEL_MASK;
page = cur_lv_tbl[off];
*plvl_shf = level_shift;
if (!(page & MMU_TYPE_USED))
{
return (void *)0;
}
return &cur_lv_tbl[off];
}
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
{
int level_shift;
unsigned long paddr;
if (aspace == &rt_kernel_space)
{
paddr = (unsigned long)rt_hw_mmu_kernel_v2p(v_addr);
}
else
{
unsigned long *pte = _query(aspace, v_addr, &level_shift);
if (pte)
{
paddr = *pte & MMU_ADDRESS_MASK;
paddr |= (rt_ubase_t)v_addr & ((1ul << level_shift) - 1);
}
else
{
paddr = (unsigned long)ARCH_MAP_FAILED;
}
}
return (void *)paddr;
}
static int _noncache(rt_ubase_t *pte)
{
int err = 0;
const rt_ubase_t idx_shift = 2;
const rt_ubase_t idx_mask = 0x7 << idx_shift;
rt_ubase_t entry = *pte;
if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
{
*pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
}
else
{
// do not support other type to be noncache
err = -RT_ENOSYS;
}
return err;
}
static int _cache(rt_ubase_t *pte)
{
int err = 0;
const rt_ubase_t idx_shift = 2;
const rt_ubase_t idx_mask = 0x7 << idx_shift;
rt_ubase_t entry = *pte;
if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
{
*pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
}
else
{
// do not support other type to be cache
err = -RT_ENOSYS;
}
return err;
}
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_ubase_t *pte) = {
[MMU_CNTL_CACHE] = _cache,
[MMU_CNTL_NONCACHE] = _noncache,
};
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd)
{
int level_shift;
int err = -RT_EINVAL;
rt_ubase_t vstart = (rt_ubase_t)vaddr;
rt_ubase_t vend = vstart + size;
int (*handler)(rt_ubase_t * pte);
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
{
handler = control_handler[cmd];
while (vstart < vend)
{
rt_ubase_t *pte = _query(aspace, (void *)vstart, &level_shift);
rt_ubase_t range_end = vstart + (1ul << level_shift);
RT_ASSERT(range_end <= vend);
if (pte)
{
err = handler(pte);
RT_ASSERT(err == RT_EOK);
}
vstart = range_end;
}
}
else
{
err = -RT_ENOSYS;
}
return err;
}
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
unsigned long size, unsigned long pv_off)
{
int ret;
unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
unsigned long normal_attr = MMU_MAP_K_RWCB;
extern unsigned char _start;
unsigned long va = (unsigned long) &_start - pv_off;
va = RT_ALIGN_DOWN(va, 0x200000);
/* setup pv off */
rt_kmem_pvoff_set(pv_off);
/* clean the first two pages */
rt_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
rt_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
ret = _init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
if (ret != 0)
{
while (1);
}
ret = _init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
if (ret != 0)
{
while (1);
}
}
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return RT_NULL;
}
memset(mmu_table, 0, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
return mmu_table;
}
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
{
rt_pages_free(pgtbl, 0);
}

View File

@ -0,0 +1,144 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 Shell Support backtrace for user thread
* 2024-01-06 Shell Fix barrier on irq_disable/enable
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "context_gcc.h"
#include "../include/vector_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
.section .text
.globl rt_hw_context_switch_to
.macro update_tidr, srcx
#ifdef ARCH_USING_HW_THREAD_SELF
msr ARM64_THREAD_REG, \srcx
#endif /* ARCH_USING_HW_THREAD_SELF */
.endm
/*
* void rt_hw_context_switch_to(rt_uint3 to, struct rt_thread *to_thread);
* X0 --> to (thread stack)
* X1 --> to_thread
*/
rt_hw_context_switch_to:
ldr x0, [x0]
mov sp, x0
update_tidr x1
/* reserved to_thread */
mov x19, x1
mov x0, x19
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov x0, x19
bl lwp_user_setting_restore
#endif
b _context_switch_exit
.globl rt_hw_context_switch
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32
to, struct rt_thread *to_thread);
* X0 --> from (from_thread stack)
* X1 --> to (to_thread stack)
* X2 --> to_thread
*/
rt_hw_context_switch:
SAVE_CONTEXT_SWITCH x19, x20
mov x3, sp
str x3, [x0] // store sp in preempted tasks TCB
ldr x0, [x1] // get new task stack pointer
mov sp, x0
update_tidr x2
/* backup thread self */
mov x19, x2
mov x0, x19
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov x0, x19
bl lwp_user_setting_restore
#endif
b _context_switch_exit
.globl rt_hw_irq_exit
.globl rt_hw_context_switch_interrupt
#define EXP_FRAME x19
#define FROM_SPP x20
#define TO_SPP x21
#define TO_TCB x22
/*
* void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb)
* X0 :interrupt context
* X1 :addr of from_thread's sp
* X2 :addr of to_thread's sp
* X3 :to_thread's tcb
*/
rt_hw_context_switch_interrupt:
#ifdef RT_USING_DEBUG
/* debug frame for backtrace */
stp x29, x30, [sp, #-0x10]!
#endif /* RT_USING_DEBUG */
/* we can discard all the previous ABI here */
mov EXP_FRAME, x0
mov FROM_SPP, x1
mov TO_SPP, x2
mov TO_TCB, x3
#ifdef RT_USING_SMART
GET_THREAD_SELF x0
bl lwp_user_setting_save
#endif /* RT_USING_SMART */
/* reset SP of from-thread */
mov sp, EXP_FRAME
/* push context for swtich */
adr lr, rt_hw_irq_exit
SAVE_CONTEXT_SWITCH_FAST
/* save SP of from-thread */
mov x0, sp
str x0, [FROM_SPP]
/* setup SP to to-thread's */
ldr x0, [TO_SPP]
mov sp, x0
update_tidr TO_TCB
mov x0, TO_TCB
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov x0, TO_TCB
bl lwp_user_setting_restore
#endif /* RT_USING_SMART */
b _context_switch_exit
_context_switch_exit:
.local _context_switch_exit
clrex
RESTORE_CONTEXT_SWITCH

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-25 Shell Trimming unecessary ops and
* improve the performance of ctx switch
*/
#ifndef __ARM64_CONTEXT_H__
#define __ARM64_CONTEXT_H__
#include "../include/context_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
.macro RESTORE_CONTEXT_SWITCH
_RESTORE_CONTEXT_SWITCH
.endm
.macro RESTORE_IRQ_CONTEXT
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
tst x3, #0x1f
msr spsr_el1, x3
msr elr_el1, x2
ldp x29, x30, [sp], #0x10
msr sp_el0, x29
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
#ifdef RT_USING_SMART
beq arch_ret_to_user
#endif
eret
.endm
#endif /* __ARM64_CONTEXT_H__ */

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "vector_gcc.h"
#include "context_gcc.h"
.section .text
vector_fiq:
.globl vector_fiq
b .
.globl rt_hw_irq_exit
/**
* void rt_hw_vector_irq_sched(void *eframe)
* @brief do IRQ scheduling
*/
rt_hw_vector_irq_sched:
.globl rt_hw_vector_irq_sched
bl rt_scheduler_do_irq_switch
b rt_hw_irq_exit

View File

@ -0,0 +1,418 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-09 GuEe-GUI The first version
* 2022-09-24 GuEe-GUI Add operations and fdt init support
*/
#include <rtthread.h>
#define DBG_TAG "osi.psci"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
/* support cpu mpidr and smccc from libcpu */
#include <cpu.h>
#include <smccc.h>
#include <psci.h>
#include <drivers/ofw.h>
#include <drivers/platform.h>
#include <drivers/core/dm.h>
struct psci_ops
{
rt_uint32_t (*get_version)(void);
rt_uint32_t (*cpu_on)(int cpuid, rt_ubase_t entry_point);
rt_uint32_t (*cpu_off)(rt_uint32_t state);
rt_uint32_t (*cpu_suspend)(rt_uint32_t power_state, rt_ubase_t entry_point);
rt_uint32_t (*migrate)(int cpuid);
rt_uint32_t (*get_affinity_info)(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level);
rt_uint32_t (*migrate_info_type)(void);
};
struct psci_0_1_func_ids
{
rt_uint32_t cpu_on;
rt_uint32_t cpu_off;
rt_uint32_t cpu_suspend;
rt_uint32_t migrate;
};
typedef rt_err_t (*psci_init_ofw_handle)(struct rt_ofw_node *np);
typedef rt_ubase_t (*psci_call_handle)(rt_uint32_t fn, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
/* [40:63] and [24:31] must be zero, other is aff3 (64bit), aff2, aff1, aff0 */
#ifdef ARCH_CPU_64BIT
#define PSCI_FNC_ID(version_major, version_min, name) PSCI_##version_major##_##version_min##_FN64_##name
#define MPIDR_MASK 0xff00ffffff
#else
#define PSCI_FNC_ID(version_major, version_min, name) PSCI_##version_major##_##version_min##_FN_##name
#define MPIDR_MASK 0x00ffffff
#endif
static struct psci_ops _psci_ops = {};
static struct psci_0_1_func_ids psci_0_1_func_ids = {};
static psci_call_handle psci_call;
/* PSCI SMCCC */
static rt_ubase_t psci_smc_call(rt_uint32_t fn, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2)
{
struct arm_smccc_res_t res;
arm_smccc_smc(fn, arg0, arg1, arg2, 0, 0, 0, 0, &res, RT_NULL);
return res.a0;
}
static rt_ubase_t psci_hvc_call(rt_uint32_t fn, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2)
{
struct arm_smccc_res_t res;
arm_smccc_hvc(fn, arg0, arg1, arg2, 0, 0, 0, 0, &res, RT_NULL);
return res.a0;
}
/* PSCI VERSION */
static rt_uint32_t psci_0_1_get_version(void)
{
return PSCI_VERSION(0, 1);
}
static rt_uint32_t psci_0_2_get_version(void)
{
return (rt_uint32_t)psci_call(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
/* PSCI FEATURES */
static rt_uint32_t psci_get_features(rt_uint32_t psci_func_id)
{
return (rt_uint32_t)psci_call(PSCI_1_0_FN_PSCI_FEATURES, psci_func_id, 0, 0);
}
/* PSCI CPU_ON */
static rt_uint32_t psci_cpu_on(rt_uint32_t func_id, int cpuid, rt_ubase_t entry_point)
{
rt_uint32_t ret = -PSCI_RET_INVALID_PARAMETERS;
if (cpuid < RT_CPUS_NR)
{
rt_ubase_t mpid = rt_cpu_mpidr_table[cpuid] & MPIDR_MASK;
ret = (rt_uint32_t)psci_call(func_id, mpid, entry_point, 0);
}
return ret;
}
static rt_uint32_t psci_0_1_cpu_on(int cpuid, rt_ubase_t entry_point)
{
return psci_cpu_on(psci_0_1_func_ids.cpu_on, cpuid, entry_point);
}
static rt_uint32_t psci_0_2_cpu_on(int cpuid, rt_ubase_t entry_point)
{
return psci_cpu_on(PSCI_FNC_ID(0, 2, CPU_ON), cpuid, entry_point);
}
/* PSCI CPU_OFF */
static rt_uint32_t psci_cpu_off(rt_uint32_t func_id, rt_uint32_t state)
{
return (rt_uint32_t)psci_call(func_id, state, 0, 0);
}
static rt_uint32_t psci_0_1_cpu_off(rt_uint32_t state)
{
return psci_cpu_off(psci_0_1_func_ids.cpu_off, state);
}
static rt_uint32_t psci_0_2_cpu_off(rt_uint32_t state)
{
return psci_cpu_off(PSCI_0_2_FN_CPU_OFF, state);
}
/* PSCI CPU_SUSPEND */
static rt_uint32_t psci_cpu_suspend(rt_uint32_t func_id, rt_uint32_t power_state, rt_ubase_t entry_point)
{
return (rt_uint32_t)psci_call(func_id, power_state, entry_point, 0);
}
static rt_uint32_t psci_0_1_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point)
{
return psci_cpu_suspend(psci_0_1_func_ids.cpu_suspend, power_state, entry_point);
}
static rt_uint32_t psci_0_2_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point)
{
return psci_cpu_suspend(PSCI_FNC_ID(0, 2, CPU_SUSPEND), power_state, entry_point);
}
/* PSCI CPU_MIGRATE */
static rt_uint32_t psci_migrate(rt_uint32_t func_id, int cpuid)
{
rt_uint32_t ret = -PSCI_RET_INVALID_PARAMETERS;
if (cpuid < RT_CPUS_NR)
{
rt_ubase_t mpid = rt_cpu_mpidr_table[cpuid] & MPIDR_MASK;
ret = (rt_uint32_t)psci_call(func_id, mpid, 0, 0);
}
return ret;
}
static rt_uint32_t psci_0_1_migrate(int cpuid)
{
return psci_migrate(psci_0_1_func_ids.migrate, cpuid);
}
static rt_uint32_t psci_0_2_migrate(int cpuid)
{
return psci_migrate(PSCI_FNC_ID(0, 2, MIGRATE), cpuid);
}
/* PSCI AFFINITY_INFO */
static rt_uint32_t psci_affinity_info(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level)
{
return (rt_uint32_t)psci_call(PSCI_FNC_ID(0, 2, AFFINITY_INFO), target_affinity, lowest_affinity_level, 0);
}
/* PSCI MIGRATE_INFO_TYPE */
static rt_uint32_t psci_migrate_info_type(void)
{
return (rt_uint32_t)psci_call(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
}
/* PSCI SYSTEM_OFF */
void psci_system_off(void)
{
psci_call(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
/* PSCI SYSTEM_RESET */
void psci_system_reboot(void)
{
if (psci_get_features(PSCI_FNC_ID(1, 1, SYSTEM_RESET2)) != PSCI_RET_NOT_SUPPORTED)
{
/*
* reset_type[31] = 0 (architectural)
* reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
* cookie = 0 (ignored by the implementation)
*/
psci_call(PSCI_FNC_ID(1, 1, SYSTEM_RESET2), 0, 0, 0);
}
else
{
psci_call(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
}
#define PSCI_CALL_FN_RET(fn, ...) \
({ \
rt_uint32_t rc; \
rc = PSCI_RET_NOT_SUPPORTED; \
if (_psci_ops.fn) \
rc = _psci_ops.fn(__VA_ARGS__); \
rc; \
})
#define PSCI_CALL_FN(fn, ...) \
({ \
if (_psci_ops.fn) \
_psci_ops.fn(__VA_ARGS__); \
})
rt_uint32_t rt_psci_get_version(void)
{
return PSCI_CALL_FN_RET(get_version);
}
rt_uint32_t rt_psci_cpu_on(int cpuid, rt_ubase_t entry_point)
{
return PSCI_CALL_FN_RET(cpu_on, cpuid, entry_point);
}
rt_uint32_t rt_psci_cpu_off(rt_uint32_t state)
{
return PSCI_CALL_FN_RET(cpu_off, state);
}
rt_uint32_t rt_psci_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point)
{
return PSCI_CALL_FN_RET(cpu_suspend, power_state, entry_point);
}
rt_uint32_t rt_psci_migrate(int cpuid)
{
return PSCI_CALL_FN_RET(migrate, cpuid);
}
rt_uint32_t rt_psci_get_affinity_info(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level)
{
return PSCI_CALL_FN_RET(get_affinity_info, target_affinity, lowest_affinity_level);
}
rt_uint32_t rt_psci_migrate_info_type(void)
{
return PSCI_CALL_FN_RET(migrate_info_type);
}
#undef PSCI_CALL_FN_RET
#undef PSCI_CALL_FN
/* PSCI INIT */
static rt_err_t psci_0_1_init(struct rt_ofw_node *np)
{
rt_err_t err = RT_EOK;
rt_uint32_t func_id;
_psci_ops.get_version = psci_0_1_get_version;
if (!rt_ofw_prop_read_u32(np, "cpu_on", &func_id))
{
psci_0_1_func_ids.cpu_on = func_id;
_psci_ops.cpu_on = psci_0_1_cpu_on;
}
if (!rt_ofw_prop_read_u32(np, "cpu_off", &func_id))
{
psci_0_1_func_ids.cpu_off = func_id;
_psci_ops.cpu_off = psci_0_1_cpu_off;
}
if (!rt_ofw_prop_read_u32(np, "cpu_suspend", &func_id))
{
psci_0_1_func_ids.cpu_suspend = func_id;
_psci_ops.cpu_suspend = psci_0_1_cpu_suspend;
}
if (!rt_ofw_prop_read_u32(np, "migrate", &func_id))
{
psci_0_1_func_ids.migrate = func_id;
_psci_ops.migrate = psci_0_1_migrate;
}
return err;
}
static rt_err_t psci_0_2_init(struct rt_ofw_node *np)
{
rt_err_t err = RT_EOK;
rt_uint32_t version = psci_0_2_get_version();
if (version >= PSCI_VERSION(0, 2))
{
_psci_ops.get_version = psci_0_2_get_version;
_psci_ops.cpu_on = psci_0_2_cpu_on;
_psci_ops.cpu_off = psci_0_2_cpu_off;
_psci_ops.cpu_suspend = psci_0_2_cpu_suspend;
_psci_ops.migrate = psci_0_2_migrate;
_psci_ops.get_affinity_info = psci_affinity_info;
_psci_ops.migrate_info_type = psci_migrate_info_type;
}
else
{
LOG_E("PSCI version detected");
err = -RT_EINVAL;
}
return err;
}
static rt_err_t psci_1_0_init(struct rt_ofw_node *np)
{
rt_err_t err;
err = psci_0_2_init(np);
return err;
}
static rt_err_t psci_ofw_init(struct rt_platform_device *pdev)
{
rt_err_t err = RT_EOK;
const char *method;
const struct rt_ofw_node_id *id = pdev->id;
struct rt_ofw_node *np = pdev->parent.ofw_node;
if (!rt_ofw_prop_read_string(np, "method", &method))
{
if (!rt_strcmp(method, "smc"))
{
psci_call = psci_smc_call;
}
else if (!rt_strcmp(method, "hvc"))
{
psci_call = psci_hvc_call;
}
else
{
LOG_E("Invalid \"method\" property: %s", method);
err = -RT_EINVAL;
}
if (!err)
{
psci_init_ofw_handle psci_init = (psci_init_ofw_handle)id->data;
err = psci_init(np);
if (!err)
{
rt_uint32_t version = rt_psci_get_version();
rt_ofw_data(np) = &_psci_ops;
RT_UNUSED(version);
LOG_I("Using PSCI v%d.%d Function IDs", PSCI_VERSION_MAJOR(version), PSCI_VERSION_MINOR(version));
}
}
}
else
{
err = -RT_ENOSYS;
}
return err;
}
static rt_err_t psci_probe(struct rt_platform_device *pdev)
{
rt_err_t err;
err = psci_ofw_init(pdev);
return err;
}
static const struct rt_ofw_node_id psci_ofw_ids[] =
{
{ .compatible = "arm,psci", .data = psci_0_1_init },
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init },
{ .compatible = "arm,psci-1.0", .data = psci_1_0_init },
{ /* sentinel */ }
};
static struct rt_platform_driver psci_driver =
{
.name = "arm-psci",
.ids = psci_ofw_ids,
.probe = psci_probe,
};
static int psci_drv_register(void)
{
rt_platform_driver_register(&psci_driver);
return 0;
}
INIT_PLATFORM_EXPORT(psci_drv_register);

View File

@ -0,0 +1,427 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-21 GuEe-GUI first version
*/
#include <rtthread.h>
#define DBG_TAG "cpu.aa64"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <smp_call.h>
#include <cpu.h>
#include <mmu.h>
#include <cpuport.h>
#include <interrupt.h>
#include <gtimer.h>
#include <setup.h>
#include <stdlib.h>
#include <ioremap.h>
#include <rtdevice.h>
#include <gic.h>
#include <gicv3.h>
#include <mm_memblock.h>
#define SIZE_KB 1024
#define SIZE_MB (1024 * SIZE_KB)
#define SIZE_GB (1024 * SIZE_MB)
extern rt_ubase_t _start, _end;
extern void _secondary_cpu_entry(void);
extern size_t MMUTable[];
extern void *system_vectors;
static void *fdt_ptr = RT_NULL;
static rt_size_t fdt_size = 0;
static rt_uint64_t initrd_ranges[3] = { };
#ifdef RT_USING_SMP
extern struct cpu_ops_t cpu_psci_ops;
extern struct cpu_ops_t cpu_spin_table_ops;
#else
extern int rt_hw_cpu_id(void);
#endif
rt_uint64_t rt_cpu_mpidr_table[] =
{
[RT_CPUS_NR] = 0,
};
static struct cpu_ops_t *cpu_ops[] =
{
#ifdef RT_USING_SMP
&cpu_psci_ops,
&cpu_spin_table_ops,
#endif
};
static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
void rt_hw_fdt_install_early(void *fdt)
{
if (fdt != RT_NULL && !fdt_check_header(fdt))
{
fdt_ptr = fdt;
fdt_size = fdt_totalsize(fdt);
}
}
#ifdef RT_USING_HWTIMER
static rt_ubase_t loops_per_tick[RT_CPUS_NR];
static rt_ubase_t cpu_get_cycles(void)
{
rt_ubase_t cycles;
rt_hw_sysreg_read(cntpct_el0, cycles);
return cycles;
}
static void cpu_loops_per_tick_init(void)
{
rt_ubase_t offset;
volatile rt_ubase_t freq, step, cycles_end1, cycles_end2;
volatile rt_uint32_t cycles_count1 = 0, cycles_count2 = 0;
rt_hw_sysreg_read(cntfrq_el0, freq);
step = freq / RT_TICK_PER_SECOND;
cycles_end1 = cpu_get_cycles() + step;
while (cpu_get_cycles() < cycles_end1)
{
__asm__ volatile ("nop");
__asm__ volatile ("add %0, %0, #1":"=r"(cycles_count1));
}
cycles_end2 = cpu_get_cycles() + step;
while (cpu_get_cycles() < cycles_end2)
{
__asm__ volatile ("add %0, %0, #1":"=r"(cycles_count2));
}
if ((rt_int32_t)(cycles_count2 - cycles_count1) > 0)
{
offset = cycles_count2 - cycles_count1;
}
else
{
/* Impossible, but prepared for any eventualities */
offset = cycles_count2 / 4;
}
loops_per_tick[rt_hw_cpu_id()] = offset;
}
static void cpu_us_delay(rt_uint32_t us)
{
volatile rt_base_t start = cpu_get_cycles(), cycles;
cycles = ((us * 0x10c7UL) * loops_per_tick[rt_hw_cpu_id()] * RT_TICK_PER_SECOND) >> 32;
while ((cpu_get_cycles() - start) < cycles)
{
rt_hw_cpu_relax();
}
}
#endif /* RT_USING_HWTIMER */
rt_weak void rt_hw_idle_wfi(void)
{
__asm__ volatile ("wfi");
}
static void system_vectors_init(void)
{
rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
}
rt_inline void cpu_info_init(void)
{
int i = 0;
rt_uint64_t mpidr;
struct rt_ofw_node *np;
/* get boot cpu info */
rt_hw_sysreg_read(mpidr_el1, mpidr);
rt_ofw_foreach_cpu_node(np)
{
rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
{
/* Only save affinity and res make smp boot can check */
hwid |= 1ULL << 31;
}
else
{
hwid = mpidr;
}
cpu_np[i] = np;
rt_cpu_mpidr_table[i] = hwid;
rt_ofw_data(np) = (void *)hwid;
for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
{
struct cpu_ops_t *ops = cpu_ops[idx];
if (ops->cpu_init)
{
ops->cpu_init(i, np);
}
}
if (++i >= RT_CPUS_NR)
{
break;
}
}
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
#ifdef RT_USING_HWTIMER
cpu_loops_per_tick_init();
if (!rt_device_hwtimer_us_delay)
{
rt_device_hwtimer_us_delay = &cpu_us_delay;
}
#endif /* RT_USING_HWTIMER */
}
void rt_hw_common_setup(void)
{
rt_size_t kernel_start, kernel_end;
rt_size_t heap_start, heap_end;
rt_size_t init_page_start, init_page_end;
rt_size_t fdt_start, fdt_end;
rt_region_t init_page_region = { 0 };
rt_region_t platform_mem_region = { 0 };
static struct mem_desc platform_mem_desc;
const rt_ubase_t pv_off = PV_OFFSET;
system_vectors_init();
#ifdef RT_USING_SMART
rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xfffffffff0000000, 0x10000000, MMUTable, pv_off);
#else
rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x10000000, MMUTable, 0);
#endif
kernel_start = RT_ALIGN_DOWN((rt_size_t)rt_kmem_v2p((void *)&_start) - 64, ARCH_PAGE_SIZE);
kernel_end = RT_ALIGN((rt_size_t)rt_kmem_v2p((void *)&_end), ARCH_PAGE_SIZE);
heap_start = kernel_end;
heap_end = RT_ALIGN(heap_start + ARCH_HEAP_SIZE, ARCH_PAGE_SIZE);
init_page_start = heap_end;
init_page_end = RT_ALIGN(init_page_start + ARCH_INIT_PAGE_SIZE, ARCH_PAGE_SIZE);
fdt_start = init_page_end;
fdt_end = RT_ALIGN(fdt_start + fdt_size, ARCH_PAGE_SIZE);
platform_mem_region.start = kernel_start;
platform_mem_region.end = fdt_end;
rt_memblock_reserve_memory("kernel", kernel_start, kernel_end, MEMBLOCK_NONE);
rt_memblock_reserve_memory("memheap", heap_start, heap_end, MEMBLOCK_NONE);
rt_memblock_reserve_memory("init-page", init_page_start, init_page_end, MEMBLOCK_NONE);
rt_memblock_reserve_memory("fdt", fdt_start, fdt_end, MEMBLOCK_NONE);
/* To virtual address */
fdt_ptr = (void *)(fdt_ptr - pv_off);
#ifdef KERNEL_VADDR_START
if ((rt_ubase_t)fdt_ptr + fdt_size - KERNEL_VADDR_START > SIZE_GB)
{
fdt_ptr = rt_ioremap_early(fdt_ptr + pv_off, fdt_size);
RT_ASSERT(fdt_ptr != RT_NULL);
}
#endif
rt_memmove((void *)(fdt_start - pv_off), fdt_ptr, fdt_size);
fdt_ptr = (void *)fdt_start - pv_off;
rt_system_heap_init((void *)(heap_start - pv_off), (void *)(heap_end - pv_off));
init_page_region.start = init_page_start - pv_off;
init_page_region.end = init_page_end - pv_off;
rt_page_init(init_page_region);
/* create MMU mapping of kernel memory */
platform_mem_region.start = RT_ALIGN_DOWN(platform_mem_region.start, ARCH_PAGE_SIZE);
platform_mem_region.end = RT_ALIGN(platform_mem_region.end, ARCH_PAGE_SIZE);
platform_mem_desc.paddr_start = platform_mem_region.start;
platform_mem_desc.vaddr_start = platform_mem_region.start - pv_off;
platform_mem_desc.vaddr_end = platform_mem_region.end - pv_off - 1;
platform_mem_desc.attr = NORMAL_MEM;
rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
if (rt_fdt_prefetch(fdt_ptr))
{
/* Platform cannot be initialized */
RT_ASSERT(0);
}
rt_fdt_scan_chosen_stdout();
rt_fdt_scan_initrd(initrd_ranges);
rt_fdt_scan_memory();
rt_memblock_setup_memory_environment();
rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
rt_fdt_unflatten();
cpu_info_init();
#ifdef RT_USING_PIC
rt_pic_init();
rt_pic_irq_init();
#else
/* initialize hardware interrupt */
rt_hw_interrupt_init();
/* initialize uart */
rt_hw_uart_init();
#endif
#ifndef RT_HWTIMER_ARM_ARCH
/* initialize timer for os tick */
rt_hw_gtimer_init();
#endif /* !RT_HWTIMER_ARM_ARCH */
#ifdef RT_USING_COMPONENTS_INIT
rt_components_board_init();
#endif
#if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
rt_ofw_console_setup();
#endif
rt_thread_idle_sethook(rt_hw_idle_wfi);
#ifdef RT_USING_SMP
rt_smp_call_init();
/* Install the IPI handle */
rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler);
rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler);
rt_hw_ipi_handler_install(RT_SMP_CALL_IPI, rt_smp_call_ipi_handler);
rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
rt_hw_interrupt_umask(RT_STOP_IPI);
rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
#endif
}
#ifdef RT_USING_SMP
rt_weak void rt_hw_secondary_cpu_up(void)
{
int cpu_id = rt_hw_cpu_id();
rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
if (!entry)
{
LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
RT_ASSERT(0);
}
/* Maybe we are no in the first cpu */
for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
{
int err;
const char *enable_method;
if (!cpu_np[i] || i == cpu_id)
{
continue;
}
err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
{
struct cpu_ops_t *ops = cpu_ops[idx];
if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
{
err = ops->cpu_boot(i, entry);
break;
}
}
if (err)
{
LOG_W("Call cpu %d on %s", i, "failed");
}
}
}
rt_weak void rt_hw_secondary_cpu_bsp_start(void)
{
int cpu_id = rt_hw_cpu_id();
system_vectors_init();
rt_hw_spin_lock(&_cpus_lock);
/* Save all mpidr */
rt_hw_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
#ifdef RT_USING_PIC
rt_pic_irq_init();
#else
/* initialize vector table */
rt_hw_vector_init();
arm_gic_cpu_init(0, 0);
#ifdef BSP_USING_GICV3
arm_gic_redist_init(0, 0);
#endif /* BSP_USING_GICV3 */
#endif
#ifndef RT_HWTIMER_ARM_ARCH
/* initialize timer for os tick */
rt_hw_gtimer_local_enable();
#endif /* !RT_HWTIMER_ARM_ARCH */
rt_dm_secondary_cpu_init();
rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
rt_hw_interrupt_umask(RT_STOP_IPI);
rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
LOG_I("Call cpu %d on %s", cpu_id, "success");
#ifdef RT_USING_HWTIMER
if (rt_device_hwtimer_us_delay == &cpu_us_delay)
{
cpu_loops_per_tick_init();
}
#endif
rt_system_scheduler_start();
}
rt_weak void rt_hw_secondary_cpu_idle_exec(void)
{
rt_hw_wfe();
}
#endif
void rt_hw_console_output(const char *str)
{
rt_fdt_earlycon_output(str);
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
/**
* SMCCC v0.2
* ARM DEN0028E chapter 2.6
*/
.macro SMCCC instr
stp x29, x30, [sp, #-16]!
mov x29, sp
\instr #0
// store in arm_smccc_res
ldr x4, [sp, #16]
stp x0, x1, [x4, #0]
stp x2, x3, [x4, #16]
1:
ldp x29, x30, [sp], #16
ret
.endm
.global arm_smccc_smc
arm_smccc_smc:
SMCCC smc
.global arm_smccc_hvc
arm_smccc_hvc:
SMCCC hvc

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-12 RT-Thread init
* 2023-07-13 GuEe-GUI append fpu: Q16 ~ Q31
*/
#include <board.h>
#include <rtthread.h>
#include <cpuport.h>
#include <armv8.h>
#define INITIAL_SPSR_EL1 (PSTATE_EL1 | SP_ELx)
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
rt_uint8_t *stack_addr, void *texit)
{
rt_ubase_t *stk;
/* The AAPCS64 requires 128-bit (16 byte) stack alignment */
stk = (rt_ubase_t*)RT_ALIGN_DOWN((rt_ubase_t)stack_addr, 16);
for (int i = 0; i < 32; ++i)
{
stk -= sizeof(rt_uint128_t) / sizeof(rt_ubase_t);
*(rt_uint128_t *)stk = (rt_uint128_t) { 0 };
}
*(--stk) = (rt_ubase_t)texit; /* X20, 2nd param */
*(--stk) = (rt_ubase_t)tentry; /* X19, 1st param */
*(--stk) = (rt_ubase_t)22; /* X22 */
*(--stk) = (rt_ubase_t)parameter; /* X21, 3rd param */
*(--stk) = (rt_ubase_t)24; /* X24 */
*(--stk) = (rt_ubase_t)23; /* X23 */
*(--stk) = (rt_ubase_t)26; /* X26 */
*(--stk) = (rt_ubase_t)25; /* X25 */
*(--stk) = (rt_ubase_t)28; /* X28 */
*(--stk) = (rt_ubase_t)27; /* X27 */
*(--stk) = (rt_ubase_t)0; /* sp_el0 */
*(--stk) = (rt_ubase_t)0; /* X29 - addr 0 as AAPCS64 specified */
*(--stk) = (rt_ubase_t)0; /* FPSR */
*(--stk) = (rt_ubase_t)0; /* FPCR */
*(--stk) = INITIAL_SPSR_EL1; /* Save Processor States */
*(--stk) = (rt_ubase_t)_thread_start; /* Exception return address. */
/* return task's current stack address */
return (rt_uint8_t *)stk;
}

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 Shell Support debug frame for user thread
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "rtconfig.h"
#include "asm-generic.h"
#include "asm-fpu.h"
#include "armv8.h"
.section .text
START_POINT(_thread_start)
mov x0, x21
blr x19
mov x29, #0
blr x20
b . /* never here */
START_POINT_END(_thread_start)

View File

@ -0,0 +1,18 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
.global Reset_Handler
.section ".start", "ax"
Reset_Handler:
nop
.text
.weak SVC_Handler
SVC_Handler:
ret

View File

@ -0,0 +1,398 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
*/
#include <rtthread.h>
#include <rthw.h>
#include <board.h>
#include <armv8.h>
#include "interrupt.h"
#include "mm_aspace.h"
#define DBG_TAG "libcpu.trap"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
#ifdef RT_USING_FINSH
extern long list_thread(void);
#endif
#ifdef RT_USING_LWP
#include <lwp.h>
#include <lwp_arch.h>
#ifdef LWP_USING_CORE_DUMP
#include <lwp_core_dump.h>
#endif
static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
{
uint32_t is_user_fault;
rt_thread_t th;
is_user_fault = !(regs->cpsr & 0x1f);
if (is_user_fault)
{
rt_kprintf("%s! pc = 0x%x\n", info, regs->pc - pc_adj);
}
/* user stack backtrace */
th = rt_thread_self();
if (th && th->lwp)
{
arch_backtrace_uthread(th);
}
if (is_user_fault)
{
#ifdef LWP_USING_CORE_DUMP
lwp_core_dump(regs, pc_adj);
#endif
sys_exit_group(-1);
}
}
rt_inline int _get_type(unsigned long esr)
{
int ret;
int fsc = ARM64_ESR_EXTRACT_FSC(esr);
switch (fsc)
{
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_0:
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_1:
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_2:
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_3:
ret = MM_FAULT_TYPE_PAGE_FAULT;
break;
case ARM64_FSC_PERMISSION_FAULT_LEVEL_0:
case ARM64_FSC_PERMISSION_FAULT_LEVEL_1:
case ARM64_FSC_PERMISSION_FAULT_LEVEL_2:
case ARM64_FSC_PERMISSION_FAULT_LEVEL_3:
ret = MM_FAULT_TYPE_RWX_PERM;
break;
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0:
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1:
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2:
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3:
/* access flag fault, not handle currently */
default:
ret = MM_FAULT_TYPE_GENERIC;
}
return ret;
}
rt_inline long _irq_is_disable(long cpsr)
{
return !!(cpsr & 0x80);
}
static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
{
rt_ubase_t level;
enum rt_mm_fault_op fault_op;
enum rt_mm_fault_type fault_type;
struct rt_lwp *lwp;
void *dfar;
int ret = 0;
unsigned char ec = ARM64_ESR_EXTRACT_EC(esr);
rt_bool_t is_write = ARM64_ABORT_WNR(esr);
switch (ec)
{
case ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = _get_type(esr);
break;
case ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE:
case ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION:
case ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE:
fault_op = is_write ? MM_FAULT_OP_WRITE : MM_FAULT_OP_READ;
fault_type = _get_type(esr);
break;
default:
/* non-fixable */
fault_op = 0;
break;
}
/* page fault exception only allow from user space */
lwp = lwp_self();
if (lwp && fault_op)
{
__asm__ volatile("mrs %0, far_el1":"=r"(dfar));
struct rt_aspace_fault_msg msg = {
.fault_op = fault_op,
.fault_type = fault_type,
.fault_vaddr = dfar,
};
lwp_user_setting_save(rt_thread_self());
__asm__ volatile("mrs %0, daif\nmsr daifclr, 0x3\nisb\n":"=r"(level));
if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
{
ret = 1;
}
__asm__ volatile("msr daif, %0\nisb\n"::"r"(level));
}
return ret;
}
#endif
/**
* this function will show registers of CPU
*
* @param regs the registers point
*/
void rt_hw_show_register(struct rt_hw_exp_stack *regs)
{
rt_kprintf("Execption:\n");
rt_kprintf("X00:0x%16.16p X01:0x%16.16p X02:0x%16.16p X03:0x%16.16p\n", (void *)regs->x0, (void *)regs->x1, (void *)regs->x2, (void *)regs->x3);
rt_kprintf("X04:0x%16.16p X05:0x%16.16p X06:0x%16.16p X07:0x%16.16p\n", (void *)regs->x4, (void *)regs->x5, (void *)regs->x6, (void *)regs->x7);
rt_kprintf("X08:0x%16.16p X09:0x%16.16p X10:0x%16.16p X11:0x%16.16p\n", (void *)regs->x8, (void *)regs->x9, (void *)regs->x10, (void *)regs->x11);
rt_kprintf("X12:0x%16.16p X13:0x%16.16p X14:0x%16.16p X15:0x%16.16p\n", (void *)regs->x12, (void *)regs->x13, (void *)regs->x14, (void *)regs->x15);
rt_kprintf("X16:0x%16.16p X17:0x%16.16p X18:0x%16.16p X19:0x%16.16p\n", (void *)regs->x16, (void *)regs->x17, (void *)regs->x18, (void *)regs->x19);
rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
}
#ifndef RT_USING_PIC
static void _rt_hw_trap_irq(rt_interrupt_context_t irq_context)
{
#ifdef SOC_BCM283x
extern rt_uint8_t core_timer_flag;
void *param;
uint32_t irq;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
uint32_t value = 0;
value = IRQ_PEND_BASIC & 0x3ff;
if(core_timer_flag != 0)
{
uint32_t cpu_id = rt_hw_cpu_id();
uint32_t int_source = CORE_IRQSOURCE(cpu_id);
if (int_source & 0x0f)
{
if (int_source & 0x08)
{
isr_func = isr_table[IRQ_ARM_TIMER].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[IRQ_ARM_TIMER].counter++;
#endif
if (isr_func)
{
param = isr_table[IRQ_ARM_TIMER].param;
isr_func(IRQ_ARM_TIMER, param);
}
}
}
}
/* local interrupt*/
if (value)
{
if (value & (1 << 8))
{
value = IRQ_PEND1;
irq = __rt_ffs(value) - 1;
}
else if (value & (1 << 9))
{
value = IRQ_PEND2;
irq = __rt_ffs(value) + 31;
}
else
{
value &= 0x0f;
irq = __rt_ffs(value) + 63;
}
/* get interrupt service routine */
isr_func = isr_table[irq].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[irq].counter++;
#endif
if (isr_func)
{
/* Interrupt for myself. */
param = isr_table[irq].param;
/* turn to interrupt service routine */
isr_func(irq, param);
}
}
#else
void *param;
int ir, ir_self;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
ir = rt_hw_interrupt_get_irq();
if (ir == 1023)
{
/* Spurious interrupt */
return;
}
/* bit 10~12 is cpuid, bit 0~9 is interrupt id */
ir_self = ir & 0x3ffUL;
/* get interrupt service routine */
isr_func = isr_table[ir_self].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[ir_self].counter++;
#ifdef RT_USING_SMP
isr_table[ir_self].cpu_counter[rt_hw_cpu_id()]++;
#endif
#endif
if (isr_func)
{
/* Interrupt for myself. */
param = isr_table[ir_self].param;
/* turn to interrupt service routine */
isr_func(ir_self, param);
}
/* end of interrupt */
rt_hw_interrupt_ack(ir);
#endif
}
#else
static void _rt_hw_trap_irq(struct rt_interrupt_context *this_ctx)
{
rt_pic_do_traps();
}
#endif
void rt_hw_trap_irq(struct rt_hw_exp_stack *regs)
{
struct rt_interrupt_context this_ctx = {
.context = regs,
.node = RT_SLIST_OBJECT_INIT(this_ctx.node),
};
rt_interrupt_context_push(&this_ctx);
_rt_hw_trap_irq(&this_ctx);
rt_interrupt_context_pop();
}
#ifdef RT_USING_SMART
#define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
#else
#define DBG_CHECK_EVENT(regs, esr) (0)
#endif
#ifndef RT_USING_PIC
void rt_hw_trap_fiq(void)
{
void *param;
int ir, ir_self;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
ir = rt_hw_interrupt_get_irq();
/* bit 10~12 is cpuid, bit 0~9 is interrup id */
ir_self = ir & 0x3ffUL;
/* get interrupt service routine */
isr_func = isr_table[ir_self].handler;
param = isr_table[ir_self].param;
/* turn to interrupt service routine */
isr_func(ir_self, param);
/* end of interrupt */
rt_hw_interrupt_ack(ir);
}
#else
void rt_hw_trap_fiq(void)
{
rt_pic_do_traps();
}
#endif
void print_exception(unsigned long esr, unsigned long epc);
void SVC_Handler(struct rt_hw_exp_stack *regs);
void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
{
unsigned long esr;
unsigned char ec;
asm volatile("mrs %0, esr_el1":"=r"(esr));
ec = (unsigned char)((esr >> 26) & 0x3fU);
if (DBG_CHECK_EVENT(regs, esr))
{
return;
}
else if (ec == 0x15) /* is 64bit syscall ? */
{
SVC_Handler(regs);
/* never return here */
}
#ifdef RT_USING_SMART
/**
* Note: check_user_stack will take lock and it will possibly be a dead-lock
* if exception comes from kernel.
*/
if ((regs->cpsr & 0x1f) == 0)
{
if (user_fault_fixable(esr, regs))
return;
}
else
{
if (_irq_is_disable(regs->cpsr))
{
LOG_E("Kernel fault from interrupt/critical section");
}
if (rt_critical_level() != 0)
{
LOG_E("scheduler is not available");
}
else if (user_fault_fixable(esr, regs))
return;
}
#endif
print_exception(esr, regs->pc);
rt_hw_show_register(regs);
LOG_E("current thread: %s\n", rt_thread_self()->parent.name);
#ifdef RT_USING_FINSH
list_thread();
#endif
#ifdef RT_USING_LWP
/* restore normal execution environment */
__asm__ volatile("msr daifclr, 0x3\ndmb ishst\nisb\n");
_check_fault(regs, 0, "user fault");
#endif
struct rt_hw_backtrace_frame frame = {.fp = regs->x29, .pc = regs->pc};
rt_backtrace_frame(rt_thread_self(), &frame);
rt_hw_cpu_shutdown();
}
void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
{
rt_kprintf("SError\n");
rt_hw_show_register(regs);
rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
#ifdef RT_USING_FINSH
list_thread();
#endif
rt_hw_cpu_shutdown();
}

View File

@ -0,0 +1,120 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 Shell Support backtrace for user thread
* 2024-01-06 Shell Fix barrier on irq_disable/enable
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "context_gcc.h"
#include "../include/vector_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
/**
* Context switch status
*/
.section .bss
rt_interrupt_from_thread:
.quad 0
rt_interrupt_to_thread:
.quad 0
rt_thread_switch_interrupt_flag:
.quad 0
.section .text
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
* X0 --> to sp
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
clrex
ldr x0, [x0]
RESTORE_CONTEXT_SWITCH x0
NEVER_RETURN
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* X0 --> from sp
* X1 --> to sp
* X2 --> to thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
clrex
SAVE_CONTEXT_SWITCH x19, x20
mov x2, sp
str x2, [x0] // store sp in preempted tasks TCB
ldr x0, [x1] // get new task stack pointer
RESTORE_CONTEXT_SWITCH x0
NEVER_RETURN
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
/*
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread);
*/
rt_hw_context_switch_interrupt:
ldr x6, =rt_thread_switch_interrupt_flag
ldr x7, [x6]
cmp x7, #1
b.eq _reswitch
/* set rt_interrupt_from_thread */
ldr x4, =rt_interrupt_from_thread
str x0, [x4]
/* set rt_thread_switch_interrupt_flag to 1 */
mov x7, #1
str x7, [x6]
stp x1, x30, [sp, #-0x10]!
#ifdef RT_USING_SMART
mov x0, x2
bl lwp_user_setting_save
#endif
ldp x1, x30, [sp], #0x10
_reswitch:
ldr x6, =rt_interrupt_to_thread // set rt_interrupt_to_thread
str x1, [x6]
ret
.globl rt_hw_context_switch_interrupt_do
/**
* rt_hw_context_switch_interrupt_do(void)
*/
rt_hw_context_switch_interrupt_do:
clrex
SAVE_CONTEXT_SWITCH_FAST
ldr x3, =rt_interrupt_from_thread
ldr x4, [x3]
mov x0, sp
str x0, [x4] // store sp in preempted tasks's tcb
ldr x3, =rt_interrupt_to_thread
ldr x4, [x3]
ldr x0, [x4] // get new task's stack pointer
RESTORE_CONTEXT_SWITCH x0
NEVER_RETURN

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-25 Shell Trimming unecessary ops and
* improve the performance of ctx switch
*/
#ifndef __ARM64_CONTEXT_H__
#define __ARM64_CONTEXT_H__
#include "../include/context_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
/* restore address space */
.macro RESTORE_ADDRESS_SPACE
#ifdef RT_USING_SMART
bl rt_thread_self
mov x19, x0
bl lwp_aspace_switch
mov x0, x19
bl lwp_user_setting_restore
#endif
.endm
.macro RESTORE_CONTEXT_SWITCH using_sp
/* Set the SP to point to the stack of the task being restored. */
mov sp, \using_sp
RESTORE_ADDRESS_SPACE
_RESTORE_CONTEXT_SWITCH
.endm
.macro RESTORE_IRQ_CONTEXT
#ifdef RT_USING_SMART
BL rt_thread_self
MOV X19, X0
BL lwp_aspace_switch
MOV X0, X19
BL lwp_user_setting_restore
#endif
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
.endm
#endif /* __ARM64_CONTEXT_H__ */

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "../include/vector_gcc.h"
#include "context_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
.section .text
vector_fiq:
.align 8
.globl vector_fiq
SAVE_IRQ_CONTEXT
bl rt_hw_trap_fiq
b rt_hw_irq_exit
.globl rt_thread_switch_interrupt_flag
.globl rt_hw_context_switch_interrupt_do
/**
* void rt_hw_vector_irq_sched(void *eframe)
* @brief do IRQ scheduling
*/
rt_hw_vector_irq_sched:
.globl rt_hw_vector_irq_sched
.align 8
/**
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return
*/
ldr x1, =rt_thread_switch_interrupt_flag
ldr x2, [x1]
cmp x2, #1
bne 1f
/* clear flag */
mov x2, #0
str x2, [x1]
bl rt_hw_context_switch_interrupt_do
1:
b rt_hw_irq_exit

View File

@ -0,0 +1,134 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <rtconfig.h>
.text
.globl system_vectors
.globl vector_exception
.globl vector_irq
.globl vector_fiq
system_vectors:
.align 11
.set VBAR, system_vectors
.org VBAR
/* Exception from CurrentEL (EL1) with SP_EL0 (SPSEL=1) */
.org (VBAR + 0x00 + 0)
b vector_serror /* Synchronous */
.org (VBAR + 0x80 + 0)
b vector_serror /* IRQ/vIRQ */
.org (VBAR + 0x100 + 0)
b vector_serror /* FIQ/vFIQ */
.org (VBAR + 0x180 + 0)
b vector_serror /* Error/vError */
/* Exception from CurrentEL (EL1) with SP_ELn */
.org (VBAR + 0x200 + 0)
b vector_exception /* Synchronous */
.org (VBAR + 0x280 + 0)
b vector_irq /* IRQ/vIRQ */
.org (VBAR + 0x300 + 0)
b vector_fiq /* FIQ/vFIQ */
.org (VBAR + 0x380 + 0)
b vector_serror
/* Exception from lower EL, aarch64 */
.org (VBAR + 0x400 + 0)
b vector_exception
.org (VBAR + 0x480 + 0)
b vector_irq
.org (VBAR + 0x500 + 0)
b vector_fiq
.org (VBAR + 0x580 + 0)
b vector_serror
/* Exception from lower EL, aarch32 */
.org (VBAR + 0x600 + 0)
b vector_serror
.org (VBAR + 0x680 + 0)
b vector_serror
.org (VBAR + 0x700 + 0)
b vector_serror
.org (VBAR + 0x780 + 0)
b vector_serror
#include "include/vector_gcc.h"
#define EFRAMEX x19
START_POINT(vector_exception)
SAVE_IRQ_CONTEXT
mov EFRAMEX, sp
SAVE_USER_CTX EFRAMEX, x0
mov x0, EFRAMEX
bl rt_hw_trap_exception
RESTORE_USER_CTX EFRAMEX, x0
/* do exception switch for IRQ/exception handlers */
EXCEPTION_SWITCH sp, x0
RESTORE_IRQ_CONTEXT
eret
START_POINT_END(vector_exception)
START_POINT(vector_serror)
SAVE_IRQ_CONTEXT
mov EFRAMEX, sp
SAVE_USER_CTX EFRAMEX, x0
mov x0, EFRAMEX
bl rt_hw_trap_serror
RESTORE_USER_CTX EFRAMEX, x0
NEVER_RETURN
START_POINT_END(vector_serror)
START_POINT(vector_irq)
SAVE_IRQ_CONTEXT
mov EFRAMEX, sp
/* trace IRQ level */
bl rt_interrupt_enter
SAVE_USER_CTX EFRAMEX, x0
/* handline IRQ */
mov x0, EFRAMEX
bl rt_hw_trap_irq
RESTORE_USER_CTX EFRAMEX, x0
/* restore IRQ level */
bl rt_interrupt_leave
mov x0, EFRAMEX
bl rt_hw_vector_irq_sched
b rt_hw_irq_exit
START_POINT_END(vector_irq)
rt_hw_irq_exit:
.globl rt_hw_irq_exit
/* do exception switch for IRQ/exception handlers */
EXCEPTION_SWITCH sp, x0
RESTORE_IRQ_CONTEXT
eret

View File

@ -0,0 +1,13 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*.S')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,359 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2020-01-15 bigmagic the first version
* 2020-08-10 SummerGift support clang compiler
* 2023-04-29 GuEe-GUI support kernel's ARM64 boot header
* 2024-01-18 Shell fix implicit dependency of cpuid management
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mmu.h>
#include <rtconfig.h>
#define ARM64_IMAGE_FLAG_BE_SHIFT 0
#define ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT (ARM64_IMAGE_FLAG_BE_SHIFT + 1)
#define ARM64_IMAGE_FLAG_PHYS_BASE_SHIFT (ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT + 2)
#define ARM64_IMAGE_FLAG_LE 0
#define ARM64_IMAGE_FLAG_BE 1
#define ARM64_IMAGE_FLAG_PAGE_SIZE_4K 1
#define ARM64_IMAGE_FLAG_PAGE_SIZE_16K 2
#define ARM64_IMAGE_FLAG_PAGE_SIZE_64K 3
#define ARM64_IMAGE_FLAG_PHYS_BASE 1
#define _HEAD_FLAG(field) (_HEAD_FLAG_##field << ARM64_IMAGE_FLAG_##field##_SHIFT)
#ifdef ARCH_CPU_BIG_ENDIAN
#define _HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE
#else
#define _HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE
#endif
#define _HEAD_FLAG_PAGE_SIZE ((ARCH_PAGE_SHIFT - 10) / 2)
#define _HEAD_FLAG_PHYS_BASE 1
#define _HEAD_FLAGS (_HEAD_FLAG(BE) | _HEAD_FLAG(PAGE_SIZE) | _HEAD_FLAG(PHYS_BASE))
.macro get_phy, reg, symbol
adrp \reg, \symbol
add \reg, \reg, #:lo12:\symbol
.endm
.macro get_pvoff, tmp, out
ldr \tmp, =.boot_cpu_stack_top
get_phy \out, .boot_cpu_stack_top
sub \out, \out, \tmp
.endm
.section ".text.entrypoint","ax"
#ifdef RT_USING_OFW
/*
* Our goal is to boot the rt-thread as possible without modifying the
* bootloader's config, so we use the kernel's boot header for ARM64:
* https://www.kernel.org/doc/html/latest/arch/arm64/booting.html#call-the-kernel-image
*/
_head:
b _start /* Executable code */
.long 0 /* Executable code */
.quad _text_offset /* Image load offset from start of RAM, little endian */
.quad _end - _head /* Effective Image size, little endian (_end defined in link.lds) */
.quad _HEAD_FLAGS /* Kernel flags, little endian */
.quad 0 /* Reserved */
.quad 0 /* Reserved */
.quad 0 /* Reserved */
.ascii "ARM\x64" /* Magic number */
.long 0 /* Reserved (used for PE COFF offset) */
#endif /* RT_USING_OFW */
/* Variable registers: x21~x28 */
dtb_paddr .req x21
boot_arg0 .req x22
boot_arg1 .req x23
boot_arg2 .req x24
stack_top .req x25
.global _start
_start:
/*
* Boot CPU general-purpose register settings:
* x0 = physical address of device tree blob (dtb) in system RAM.
* x1 = 0 (reserved for future use)
* x2 = 0 (reserved for future use)
* x3 = 0 (reserved for future use)
*/
mov dtb_paddr, x0
mov boot_arg0, x1
mov boot_arg1, x2
mov boot_arg2, x3
/* Save cpu stack */
get_phy stack_top, .boot_cpu_stack_top
/* Save cpu id temp */
#ifdef ARCH_USING_HW_THREAD_SELF
msr tpidrro_el0, xzr
/* Save thread self */
#endif /* ARCH_USING_HW_THREAD_SELF */
msr tpidr_el1, xzr
bl init_cpu_el
bl init_kernel_bss
bl init_cpu_stack_early
#ifdef RT_USING_OFW
/* Save devicetree info */
mov x0, dtb_paddr
bl rt_hw_fdt_install_early
#endif
/* Now we are in the end of boot cpu process */
ldr x8, =rtthread_startup
b init_mmu_early
/* never come back */
kernel_start:
/* jump to the PE's system entry */
mov x29, xzr
mov x30, x8
br x8
cpu_idle:
wfe
b cpu_idle
#ifdef RT_USING_SMP
.globl _secondary_cpu_entry
_secondary_cpu_entry:
#ifdef RT_USING_OFW
/* Read cpu id */
mrs x5, mpidr_el1
ldr x1, =rt_cpu_mpidr_table
get_pvoff x4 x2
add x1, x1, x2
mov x2, #0
ldr x4, =0xff00ffffff
and x0, x5, x4
.cpu_id_confirm:
add x2, x2, #1 /* Next cpu id inc */
ldr x3, [x1], #8
cmp x3, #0
beq cpu_idle
and x3, x3, x4
cmp x3, x0
bne .cpu_id_confirm
/* Save this mpidr */
str x5, [x1, #-8]
/* Get cpu id success */
sub x0, x2, #1
#endif /* RT_USING_OFW */
/* Save cpu id global */
bl rt_hw_cpu_id_set
bl rt_hw_cpu_id
/* Set current cpu's stack top */
sub x0, x0, #1
mov x1, #ARCH_SECONDARY_CPU_STACK_SIZE
get_phy x2, .secondary_cpu_stack_top
msub stack_top, x0, x1, x2
bl init_cpu_el
bl init_cpu_stack_early
/* secondary cpu start to startup */
ldr x8, =rt_hw_secondary_cpu_bsp_start
b enable_mmu_early
#endif /* RT_USING_SMP */
init_cpu_el:
mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
lsr x0, x0, #2
and x0, x0, #3
/* running at EL3? */
cmp x0, #3
bne .init_cpu_hyp
/* should never be executed, just for completeness. (EL3) */
mov x1, #(1 << 0) /* EL0 and EL1 are in Non-Secure state */
orr x1, x1, #(1 << 4) /* RES1 */
orr x1, x1, #(1 << 5) /* RES1 */
/* bic x1, x1, #(1 << 7) disable Secure Monitor Call */
orr x1, x1, #(1 << 10) /* The next lower level is AArch64 */
msr scr_el3, x1
mov x1, #9 /* Next level is 0b1001->EL2h */
orr x1, x1, #(1 << 6) /* Mask FIQ */
orr x1, x1, #(1 << 7) /* Mask IRQ */
orr x1, x1, #(1 << 8) /* Mask SError */
orr x1, x1, #(1 << 9) /* Mask Debug Exception */
msr spsr_el3, x1
get_phy x1, .init_cpu_hyp
msr elr_el3, x1
eret
.init_cpu_hyp:
/* running at EL2? */
cmp x0, #2 /* EL2 = 0b10 */
bne .init_cpu_sys
/* Enable CNTP for EL1 */
mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
orr x0, x0, #(1 << 0) /* Don't traps NS EL0/1 accesses to the physical counter */
orr x0, x0, #(1 << 1) /* Don't traps NS EL0/1 accesses to the physical timer */
msr cnthctl_el2, x0
msr cntvoff_el2, xzr
mov x0, #(1 << 31) /* Enable AArch64 in EL1 */
orr x0, x0, #(1 << 1) /* SWIO hardwired */
msr hcr_el2, x0
mov x0, #5 /* Next level is 0b0101->EL1h */
orr x0, x0, #(1 << 6) /* Mask FIQ */
orr x0, x0, #(1 << 7) /* Mask IRQ */
orr x0, x0, #(1 << 8) /* Mask SError */
orr x0, x0, #(1 << 9) /* Mask Debug Exception */
msr spsr_el2, x0
get_phy x0, .init_cpu_sys
msr elr_el2, x0
eret
.init_cpu_sys:
mrs x0, sctlr_el1
bic x0, x0, #(3 << 3) /* Disable SP Alignment check */
bic x0, x0, #(1 << 1) /* Disable Alignment check */
msr sctlr_el1, x0
mrs x0, cntkctl_el1
orr x0, x0, #(1 << 1) /* Set EL0VCTEN, enabling the EL0 Virtual Count Timer */
msr cntkctl_el1, x0
/* Avoid trap from SIMD or float point instruction */
mov x0, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
msr cpacr_el1, x0
/* Applying context change */
dsb ish
isb
ret
init_kernel_bss:
get_phy x1, __bss_start
get_phy x2, __bss_end
sub x2, x2, x1 /* Get bss size */
and x3, x2, #7 /* x3 is < 7 */
ldr x4, =~0x7
and x2, x2, x4 /* Mask ~7 */
.clean_bss_loop_quad:
cbz x2, .clean_bss_loop_byte
str xzr, [x1], #8
sub x2, x2, #8
b .clean_bss_loop_quad
.clean_bss_loop_byte:
cbz x3, .clean_bss_end
strb wzr, [x1], #1
sub x3, x3, #1
b .clean_bss_loop_byte
.clean_bss_end:
ret
init_cpu_stack_early:
msr spsel, #1
mov sp, stack_top
ret
init_mmu_early:
get_phy x0, .early_page_array
bl set_free_page
get_phy x0, .early_tbl0_page
get_phy x1, .early_tbl1_page
get_pvoff x2 x3
ldr x2, =ARCH_EARLY_MAP_SIZE /* Map 1G memory for kernel space */
bl rt_hw_mem_setup_early
b enable_mmu_early
enable_mmu_early:
get_phy x0, .early_tbl0_page
get_phy x1, .early_tbl1_page
msr ttbr0_el1, x0
msr ttbr1_el1, x1
dsb sy
bl mmu_tcr_init
/*
* OK, now, we don't use sp before jump to kernel, set sp to current cpu's
* stack top to visual address
*/
get_pvoff x1 x0
mov x1, stack_top
sub x1, x1, x0
mov sp, x1
ldr x30, =kernel_start /* Set LR to kernel_start function, it's virtual addresses */
/* Enable page table translation */
mrs x1, sctlr_el1
orr x1, x1, #(1 << 12) /* Stage 1 instruction access Cacheability control */
orr x1, x1, #(1 << 2) /* Cacheable Normal memory in stage1 */
orr x1, x1, #(1 << 0) /* MMU Enable */
msr sctlr_el1, x1
dsb ish
isb
ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
dsb ish
isb
tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
dsb ish
isb
ret
/*
* CPU stack builtin
*/
.section ".bss.noclean.cpus_stack"
.align 12
.cpus_stack:
#if defined(RT_USING_SMP) && RT_CPUS_NR > 1
.space (ARCH_SECONDARY_CPU_STACK_SIZE * (RT_CPUS_NR - 1))
#endif
.secondary_cpu_stack_top:
.space ARCH_SECONDARY_CPU_STACK_SIZE
.boot_cpu_stack_top:
/*
* Early page builtin
*/
.section ".bss.noclean.early_page"
.align 12
.early_tbl0_page:
.space ARCH_PAGE_SIZE
.early_tbl1_page:
/* Map 4G -> 2M * 512 entries */
.space 4 * ARCH_PAGE_SIZE
.early_page_array:
.space 24 * ARCH_PAGE_SIZE

View File

@ -0,0 +1,198 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2017-5-30 bernard first version
*/
#include "rtconfig.h"
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
#ifndef ARCH_TEXT_OFFSET
#define ARCH_TEXT_OFFSET 0x200000 /* We always boot in address where is 2MB aligned */
#endif
#ifndef ARCH_RAM_OFFSET
#define ARCH_RAM_OFFSET 0
#endif
SECTIONS
{
_text_offset = ARCH_TEXT_OFFSET;
#ifdef RT_USING_SMART
. = KERNEL_VADDR_START + _text_offset;
#else
. = ARCH_RAM_OFFSET + _text_offset;
#endif
.text :
{
PROVIDE(__text_start = .);
KEEP(*(.text.entrypoint)) /* The entry point */
*(.vectors)
*(.text) /* remaining code */
*(.text.*) /* remaining code */
*(.rodata) /* read-only data (constants) */
*(.rodata*)
*(.glue_7)
*(.glue_7t)
*(.gnu.linkonce.t*)
/* section information for utest */
. = ALIGN(8);
PROVIDE(__rt_utest_tc_tab_start = .);
KEEP(*(UtestTcTab))
PROVIDE(__rt_utest_tc_tab_end = .);
/* section information for finsh shell */
. = ALIGN(8);
PROVIDE(__fsymtab_start = .);
KEEP(*(FSymTab))
PROVIDE(__fsymtab_end = .);
. = ALIGN(8);
PROVIDE(__vsymtab_start = .);
KEEP(*(VSymTab))
PROVIDE(__vsymtab_end = .);
. = ALIGN(8);
/* section information for modules */
. = ALIGN(8);
PROVIDE(__rtmsymtab_start = .);
KEEP(*(RTMSymTab))
PROVIDE(__rtmsymtab_end = .);
/* section information for initialization */
. = ALIGN(8);
PROVIDE(__rt_init_start = .);
KEEP(*(SORT(.rti_fn*)))
PROVIDE(__rt_init_end = .);
/* section information for rt_ofw. */
. = ALIGN(16);
PROVIDE(__rt_ofw_data_start = .);
KEEP(*(SORT(.rt_ofw_data.*)))
PROVIDE(__rt_ofw_data_end = .);
. = ALIGN(16);
/* section information for usb usbh_class_info */
. = ALIGN(4);
__usbh_class_info_start__ = .;
KEEP(*(.usbh_class_info))
. = ALIGN(4);
__usbh_class_info_end__ = .;
PROVIDE(__text_end = .);
}
.eh_frame_hdr :
{
*(.eh_frame_hdr)
*(.eh_frame_entry)
}
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
. = ALIGN(8);
.data :
{
*(.data)
*(.data.*)
*(.data1)
*(.data1.*)
. = ALIGN(16);
_gp = ABSOLUTE(.); /* Base of small data */
*(.sdata)
*(.sdata.*)
*(.rel.local)
}
. = ALIGN(8);
.ctors :
{
PROVIDE(__ctors_start__ = .);
/* new GCC version uses .init_array */
KEEP(*(SORT(.init_array.*)))
KEEP(*(.init_array))
PROVIDE(__ctors_end__ = .);
}
.dtors :
{
PROVIDE(__dtors_start__ = .);
KEEP(*(SORT(.dtors.*)))
KEEP(*(.dtors))
PROVIDE(__dtors_end__ = .);
}
. = ALIGN(16);
.bss :
{
/*
* We need some free space to page or cpu stack, move .bss.noclean.*
* to optimize size.
*/
PROVIDE(__bss_noclean_start = .);
*(.bss.noclean.*)
PROVIDE(__bss_noclean_end = .);
. = ALIGN(8);
PROVIDE(__bss_start = .);
*(.bss)
*(.bss.*)
*(.dynbss)
*(COMMON)
. = ALIGN(8);
PROVIDE(__bss_end = .);
}
/*
* We should make the bootloader know the size of memory we need,
* so we MUST calc the image's size with section '.bss'.
*/
_end = .;
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* DWARF debug sections.
* Symbols in the DWARF debugging sections are relative to the beginning
* of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
__data_size = SIZEOF(.data);
__bss_size = SIZEOF(.bss);
}

View File

@ -0,0 +1,17 @@
# RT-Thread building script for bridge
import os
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
group = []
# add common code files
group = group + SConscript(os.path.join('common', 'SConscript'))
# cpu porting code files
group = group + SConscript(os.path.join(rtconfig.CPU, 'SConscript'))
Return('group')

View File

@ -0,0 +1,9 @@
from building import *
cwd = GetCurrentDir()
src = Glob('*.c')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,15 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c')
CPPPATH = [cwd]
src += Glob('*.S')
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,369 @@
/*
* Copyright (c) 2018, Synopsys, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#define __ASSEMBLY__
#include "include/arc/arc.h"
#include "include/arc/arc_asm_common.h"
.global rt_interrupt_enter;
.global rt_interrupt_leave;
.global rt_thread_switch_interrupt_flag;
.global rt_interrupt_from_thread;
.global rt_interrupt_to_thread;
.global exc_nest_count;
.global set_hw_stack_check;
.text
.align 4
dispatcher:
st sp, [r0]
ld sp, [r1]
#if ARC_FEATURE_STACK_CHECK
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bclr r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
jl set_hw_stack_check
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bset r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bset r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
#endif
pop r0
j [r0]
/* return routine when task dispatch happened in task context */
dispatch_r:
RESTORE_NONSCRATCH_REGS
RESTORE_R0_TO_R12
j [blink]
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.align 4
rt_hw_interrupt_disable:
clri r0
j [blink]
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.align 4
rt_hw_interrupt_enable:
seti r0
j [blink]
.global rt_hw_context_switch_interrupt
.align 4
rt_hw_context_switch_interrupt:
ld r2, [rt_thread_switch_interrupt_flag]
breq r2, 1, _reswitch /* Check the flag, if it is 1, skip to reswitch */
mov r2, 1
st r2, [rt_thread_switch_interrupt_flag]
st r0, [rt_interrupt_from_thread]
_reswitch:
st r1, [rt_interrupt_to_thread]
j [blink]
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch
.align 4
rt_hw_context_switch:
SAVE_R0_TO_R12
SAVE_NONSCRATCH_REGS
mov r2, dispatch_r
push r2
b dispatcher
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.global rt_hw_context_switch_to
.align 4
rt_hw_context_switch_to:
ld sp, [r0]
#if ARC_FEATURE_STACK_CHECK
mov r1, r0
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bclr r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
jl set_hw_stack_check
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bset r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bset r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
#endif
pop r0
j [r0]
.global start_r
.align 4
start_r:
pop blink;
pop r1
pop r2
pop r0
j_s.d [r1]
kflag r2
/*
* int __rt_ffs(int value);
* r0 --> value
*/
.global __rt_ffs
.align 4
__rt_ffs:
breq r0, 0, __rt_ffs_return
ffs r1, r0
add r0, r1, 1
__rt_ffs_return:
j [blink]
/****** exceptions and interrupts handing ******/
/****** entry for exception handling ******/
.global exc_entry_cpu
.align 4
exc_entry_cpu:
EXCEPTION_PROLOGUE
mov blink, sp
mov r3, sp /* as exception handler's para(p_excinfo) */
ld r0, [exc_nest_count]
add r1, r0, 1
st r1, [exc_nest_count]
brne r0, 0, exc_handler_1
/* change to exception stack if interrupt happened in task context */
mov sp, _e_stack
exc_handler_1:
PUSH blink
lr r0, [AUX_ECR]
lsr r0, r0, 16
mov r1, exc_int_handler_table
ld.as r2, [r1, r0]
mov r0, r3
jl [r2]
/* interrupts are not allowed */
ret_exc:
POP sp
mov r1, exc_nest_count
ld r0, [r1]
sub r0, r0, 1
st r0, [r1]
brne r0, 0, ret_exc_1 /* nest exception case */
lr r1, [AUX_IRQ_ACT] /* nest interrupt case */
brne r1, 0, ret_exc_1
ld r0, [rt_thread_switch_interrupt_flag]
brne r0, 0, ret_exc_2
ret_exc_1: /* return from non-task context, interrupts or exceptions are nested */
EXCEPTION_EPILOGUE
rtie
/* there is a dispatch request */
ret_exc_2:
/* clear dispatch request */
mov r0, 0
st r0, [rt_thread_switch_interrupt_flag]
SAVE_CALLEE_REGS /* save callee save registers */
/* clear exception bit to do exception exit by SW */
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_AE
kflag r0
mov r1, ret_exc_r /* save return address */
PUSH r1
ld r0, [rt_interrupt_from_thread]
ld r1, [rt_interrupt_to_thread]
b dispatcher
ret_exc_r:
/* recover exception status */
lr r0, [AUX_STATUS32]
bset r0, r0, AUX_STATUS_BIT_AE
kflag r0
RESTORE_CALLEE_REGS
EXCEPTION_EPILOGUE
rtie
/****** entry for normal interrupt exception handling ******/
.global exc_entry_int /* entry for interrupt handling */
.align 4
exc_entry_int:
#if ARC_FEATURE_FIRQ == 1
/* check whether it is P0 interrupt */
#if ARC_FEATURE_RGF_NUM_BANKS > 1
lr r0, [AUX_IRQ_ACT]
btst r0, 0
jnz exc_entry_firq
#else
PUSH r10
lr r10, [AUX_IRQ_ACT]
btst r10, 0
POP r10
jnz exc_entry_firq
#endif
#endif
INTERRUPT_PROLOGUE
mov blink, sp
clri /* disable interrupt */
ld r3, [exc_nest_count]
add r2, r3, 1
st r2, [exc_nest_count]
seti /* enable higher priority interrupt */
brne r3, 0, irq_handler_1
/* change to exception stack if interrupt happened in task context */
mov sp, _e_stack
#if ARC_FEATURE_STACK_CHECK
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bclr r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
#endif
irq_handler_1:
PUSH blink
jl rt_interrupt_enter
lr r0, [AUX_IRQ_CAUSE]
sr r0, [AUX_IRQ_SELECT]
mov r1, exc_int_handler_table
ld.as r2, [r1, r0] /* r2 = exc_int_handler_table + irqno *4 */
/* handle software triggered interrupt */
lr r3, [AUX_IRQ_HINT]
cmp r3, r0
bne.d irq_hint_handled
xor r3, r3, r3
sr r3, [AUX_IRQ_HINT]
irq_hint_handled:
lr r3, [AUX_IRQ_PRIORITY]
PUSH r3 /* save irq priority */
jl [r2] /* jump to interrupt handler */
jl rt_interrupt_leave
ret_int:
clri /* disable interrupt */
POP r3 /* irq priority */
POP sp
mov r1, exc_nest_count
ld r0, [r1]
sub r0, r0, 1
st r0, [r1]
/* if there are multi-bits set in IRQ_ACT, it's still in nest interrupt */
lr r0, [AUX_IRQ_CAUSE]
sr r0, [AUX_IRQ_SELECT]
lr r3, [AUX_IRQ_PRIORITY]
lr r1, [AUX_IRQ_ACT]
bclr r2, r1, r3
brne r2, 0, ret_int_1
ld r0, [rt_thread_switch_interrupt_flag]
brne r0, 0, ret_int_2
ret_int_1: /* return from non-task context */
INTERRUPT_EPILOGUE
rtie
/* there is a dispatch request */
ret_int_2:
/* clear dispatch request */
mov r0, 0
st r0, [rt_thread_switch_interrupt_flag]
/* interrupt return by SW */
lr r10, [AUX_IRQ_ACT]
PUSH r10
bclr r10, r10, r3 /* clear related bits in IRQ_ACT */
sr r10, [AUX_IRQ_ACT]
SAVE_CALLEE_REGS /* save callee save registers */
mov r1, ret_int_r /* save return address */
PUSH r1
ld r0, [rt_interrupt_from_thread]
ld r1, [rt_interrupt_to_thread]
b dispatcher
ret_int_r:
RESTORE_CALLEE_REGS
/* recover AUX_IRQ_ACT to restore the interrup status */
POPAX AUX_IRQ_ACT
INTERRUPT_EPILOGUE
rtie
/****** entry for fast irq exception handling ******/
.global exc_entry_firq
.weak exc_entry_firq
.align 4
exc_entry_firq:
SAVE_FIQ_EXC_REGS
lr r0, [AUX_IRQ_CAUSE]
mov r1, exc_int_handler_table
/* r2 = _kernel_exc_tbl + irqno *4 */
ld.as r2, [r1, r0]
/* for the case of software triggered interrupt */
lr r3, [AUX_IRQ_HINT]
cmp r3, r0
bne.d firq_hint_handled
xor r3, r3, r3
sr r3, [AUX_IRQ_HINT]
firq_hint_handled:
/* jump to interrupt handler */
mov r0, sp
jl [r2]
firq_return:
RESTORE_FIQ_EXC_REGS
rtie

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2018, Synopsys, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <rtthread.h>
#include "arc/arc_exception.h"
#if ARC_FEATURE_STACK_CHECK
#define ARC_INIT_STATUS ((1 << AUX_STATUS_BIT_SC) | AUX_STATUS_MASK_IE | ((-1 - INT_PRI_MIN) << 1) | STATUS32_RESET_VALUE)
#else
#define ARC_INIT_STATUS (AUX_STATUS_MASK_IE | ((-1 - INT_PRI_MIN) << 1) | STATUS32_RESET_VALUE)
#endif
extern void start_r(void);
rt_uint32_t rt_thread_switch_interrupt_flag;
rt_uint32_t rt_interrupt_from_thread;
rt_uint32_t rt_interrupt_to_thread;
rt_uint32_t exc_nest_count;
struct init_stack_frame {
rt_uint32_t pc;
rt_uint32_t blink;
rt_uint32_t task;
rt_uint32_t status32;
rt_uint32_t r0;
};
rt_uint8_t *rt_hw_stack_init(void *tentry,
void *parameter,
rt_uint8_t *stack_addr,
void *texit)
{
struct init_stack_frame *stack_frame;
rt_uint8_t *stk;
stk = stack_addr + sizeof(rt_uint32_t);
stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
stk -= sizeof(struct init_stack_frame);
stack_frame = (struct init_stack_frame *)stk;
stack_frame->pc = (rt_uint32_t)start_r;
stack_frame->blink = (rt_uint32_t)texit;
stack_frame->task = (rt_uint32_t)tentry;
stack_frame->status32 = ARC_INIT_STATUS;
stack_frame->r0 = (rt_uint32_t)parameter;
return stk;
}
/**
* This function set the hook, which is invoked on fault exception handling.
*
* @param exception_handle the exception handling hook function.
*/
void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context))
{
exception_handle = exception_handle;
}
void set_hw_stack_check(rt_uint32_t *from, rt_uint32_t *to)
{
struct rt_thread *rt_thread_to;
if (to != NULL) {
rt_thread_to = rt_container_of(to, struct rt_thread, sp);
#if ARC_FEATURE_SEC_PRESENT
arc_aux_write(AUX_S_KSTACK_TOP, (uint32_t)(rt_thread_to->stack_addr));
arc_aux_write(AUX_S_KSTACK_BASE, (uint32_t)(rt_thread_to->stack_addr)+rt_thread_to->stack_size);
#else
arc_aux_write(AUX_KSTACK_TOP, (uint32_t)(rt_thread_to->stack_addr));
arc_aux_write(AUX_KSTACK_BASE, (uint32_t)(rt_thread_to->stack_addr)+rt_thread_to->stack_size);
#endif
}
}

View File

@ -0,0 +1,293 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard first version
*/
#ifndef __AT91SAM7S_H__
#define __AT91SAM7S_H__
#ifdef __cplusplus
extern "C" {
#endif
#define AT91_REG *(volatile unsigned int *) /* Hardware register definition */
/* ========== Register definition for TC0 peripheral ========== */
#define AT91C_TC0_SR (AT91_REG(0xFFFA0020)) /* TC0 Status Register */
#define AT91C_TC0_RC (AT91_REG(0xFFFA001C)) /* TC0 Register C */
#define AT91C_TC0_RB (AT91_REG(0xFFFA0018)) /* TC0 Register B */
#define AT91C_TC0_CCR (AT91_REG(0xFFFA0000)) /* TC0 Channel Control Register */
#define AT91C_TC0_CMR (AT91_REG(0xFFFA0004)) /* TC0 Channel Mode Register (Capture Mode / Waveform Mode) */
#define AT91C_TC0_IER (AT91_REG(0xFFFA0024)) /* TC0 Interrupt Enable Register */
#define AT91C_TC0_RA (AT91_REG(0xFFFA0014)) /* TC0 Register A */
#define AT91C_TC0_IDR (AT91_REG(0xFFFA0028)) /* TC0 Interrupt Disable Register */
#define AT91C_TC0_CV (AT91_REG(0xFFFA0010)) /* TC0 Counter Value */
#define AT91C_TC0_IMR (AT91_REG(0xFFFA002C)) /* TC0 Interrupt Mask Register */
/* ========== Register definition for TC1 peripheral ========== */
#define AT91C_TC1_RB (AT91_REG(0xFFFA0058)) /* TC1 Register B */
#define AT91C_TC1_CCR (AT91_REG(0xFFFA0040)) /* TC1 Channel Control Register */
#define AT91C_TC1_IER (AT91_REG(0xFFFA0064)) /* TC1 Interrupt Enable Register */
#define AT91C_TC1_IDR (AT91_REG(0xFFFA0068)) /* TC1 Interrupt Disable Register */
#define AT91C_TC1_SR (AT91_REG(0xFFFA0060)) /* TC1 Status Register */
#define AT91C_TC1_CMR (AT91_REG(0xFFFA0044)) /* TC1 Channel Mode Register (Capture Mode / Waveform Mode) */
#define AT91C_TC1_RA (AT91_REG(0xFFFA0054)) /* TC1 Register A */
#define AT91C_TC1_RC (AT91_REG(0xFFFA005C)) /* TC1 Register C */
#define AT91C_TC1_IMR (AT91_REG(0xFFFA006C)) /* TC1 Interrupt Mask Register */
#define AT91C_TC1_CV (AT91_REG(0xFFFA0050)) /* TC1 Counter Value */
/* ========== Register definition for TC2 peripheral ========== */
#define AT91C_TC2_CMR (AT91_REG(0xFFFA0084)) /* TC2 Channel Mode Register (Capture Mode / Waveform Mode) */
#define AT91C_TC2_CCR (AT91_REG(0xFFFA0080)) /* TC2 Channel Control Register */
#define AT91C_TC2_CV (AT91_REG(0xFFFA0090)) /* TC2 Counter Value */
#define AT91C_TC2_RA (AT91_REG(0xFFFA0094)) /* TC2 Register A */
#define AT91C_TC2_RB (AT91_REG(0xFFFA0098)) /* TC2 Register B */
#define AT91C_TC2_IDR (AT91_REG(0xFFFA00A8)) /* TC2 Interrupt Disable Register */
#define AT91C_TC2_IMR (AT91_REG(0xFFFA00AC)) /* TC2 Interrupt Mask Register */
#define AT91C_TC2_RC (AT91_REG(0xFFFA009C)) /* TC2 Register C */
#define AT91C_TC2_IER (AT91_REG(0xFFFA00A4)) /* TC2 Interrupt Enable Register */
#define AT91C_TC2_SR (AT91_REG(0xFFFA00A0)) /* TC2 Status Register */
/* ========== Register definition for PITC peripheral ========== */
#define AT91C_PITC_PIVR (AT91_REG(0xFFFFFD38)) /* PITC Period Interval Value Register */
#define AT91C_PITC_PISR (AT91_REG(0xFFFFFD34)) /* PITC Period Interval Status Register */
#define AT91C_PITC_PIIR (AT91_REG(0xFFFFFD3C)) /* PITC Period Interval Image Register */
#define AT91C_PITC_PIMR (AT91_REG(0xFFFFFD30)) /* PITC Period Interval Mode Register */
/* ========== Register definition for UDP peripheral ========== */
#define AT91C_UDP_NUM (AT91_REG(0xFFFB0000)) /* UDP Frame Number Register */
#define AT91C_UDP_STAT (AT91_REG(0xFFFB0004)) /* UDP Global State Register */
#define AT91C_UDP_FADDR (AT91_REG(0xFFFB0008)) /* UDP Function Address Register */
#define AT91C_UDP_IER (AT91_REG(0xFFFB0010)) /* UDP Interrupt Enable Register */
#define AT91C_UDP_IDR (AT91_REG(0xFFFB0014)) /* UDP Interrupt Disable Register */
#define AT91C_UDP_IMR (AT91_REG(0xFFFB0018)) /* UDP Interrupt Mask Register */
#define AT91C_UDP_ISR (AT91_REG(0xFFFB001C)) /* UDP Interrupt Status Register */
#define AT91C_UDP_ICR (AT91_REG(0xFFFB0020)) /* UDP Interrupt Clear Register */
#define AT91C_UDP_RSTEP (AT91_REG(0xFFFB0028)) /* UDP Reset Endpoint Register */
#define AT91C_UDP_CSR0 (AT91_REG(0xFFFB0030)) /* UDP Endpoint Control and Status Register */
#define AT91C_UDP_CSR(n) (*(&AT91C_UDP_CSR0 + n))
#define AT91C_UDP_FDR0 (AT91_REG(0xFFFB0050)) /* UDP Endpoint FIFO Data Register */
#define AT91C_UDP_FDR(n) (*(&AT91C_UDP_FDR0 + n))
#define AT91C_UDP_TXVC (AT91_REG(0xFFFB0074)) /* UDP Transceiver Control Register */
/* ========== Register definition for US0 peripheral ========== */
#define AT91C_US0_CR (AT91_REG(0xFFFC0000)) /* US0 Control Register */
#define AT91C_US0_MR (AT91_REG(0xFFFC0004)) /* US0 Mode Register */
#define AT91C_US0_IER (AT91_REG(0xFFFC0008)) /* US0 Interrupt Enable Register */
#define AT91C_US0_IDR (AT91_REG(0xFFFC000C)) /* US0 Interrupt Disable Register */
#define AT91C_US0_IMR (AT91_REG(0xFFFC0010)) /* US0 Interrupt Mask Register */
#define AT91C_US0_CSR (AT91_REG(0xFFFC0014)) /* US0 Channel Status Register */
#define AT91C_US0_RHR (AT91_REG(0xFFFC0018)) /* US0 Receiver Holding Register */
#define AT91C_US0_THR (AT91_REG(0xFFFC001C)) /* US0 Transmitter Holding Register */
#define AT91C_US0_BRGR (AT91_REG(0xFFFC0020)) /* US0 Baud Rate Generator Register */
#define AT91C_US0_RTOR (AT91_REG(0xFFFC0024)) /* US0 Receiver Time-out Register */
#define AT91C_US0_TTGR (AT91_REG(0xFFFC0028)) /* US0 Transmitter Time-guard Register */
#define AT91C_US0_NER (AT91_REG(0xFFFC0044)) /* US0 Nb Errors Register */
#define AT91C_US0_FIDI (AT91_REG(0xFFFC0040)) /* US0 FI_DI_Ratio Register */
#define AT91C_US0_IF (AT91_REG(0xFFFC004C)) /* US0 IRDA_FILTER Register */
/* ========== Register definition for AIC peripheral ========== */
#define AT91C_AIC_SMR0 (AT91_REG(0xFFFFF000)) /* AIC Source Mode Register */
#define AT91C_AIC_SMR(n) (*(&AT91C_AIC_SMR0 + n))
#define AT91C_AIC_SVR0 (AT91_REG(0xFFFFF080)) /* AIC Source Vector Register */
#define AT91C_AIC_SVR(n) (*(&AT91C_AIC_SVR0 + n))
#define AT91C_AIC_IVR (AT91_REG(0xFFFFF100)) /* AIC Interrupt Vector Register */
#define AT91C_AIC_FVR (AT91_REG(0xFFFFF104)) /* AIC FIQ Vector Register */
#define AT91C_AIC_ISR (AT91_REG(0xFFFFF108)) /* AIC Interrupt Status Register */
#define AT91C_AIC_IPR (AT91_REG(0xFFFFF10C)) /* AIC Interrupt Pending Register */
#define AT91C_AIC_IMR (AT91_REG(0xFFFFF110)) /* AIC Interrupt Mask Register */
#define AT91C_AIC_CISR (AT91_REG(0xFFFFF114)) /* AIC Core Interrupt Status Register */
#define AT91C_AIC_IECR (AT91_REG(0xFFFFF120)) /* AIC Interrupt Enable Command Register */
#define AT91C_AIC_IDCR (AT91_REG(0xFFFFF124)) /* AIC Interrupt Disable Command Register */
#define AT91C_AIC_ICCR (AT91_REG(0xFFFFF128)) /* AIC Interrupt Clear Command Register */
#define AT91C_AIC_ISCR (AT91_REG(0xFFFFF12C)) /* AIC Interrupt Set Command Register */
#define AT91C_AIC_EOICR (AT91_REG(0xFFFFF130)) /* AIC End of Interrupt Command Register */
#define AT91C_AIC_SPU (AT91_REG(0xFFFFF134)) /* AIC Spurious Vector Register */
#define AT91C_AIC_DCR (AT91_REG(0xFFFFF138)) /* AIC Debug Control Register (Protect) */
#define AT91C_AIC_FFER (AT91_REG(0xFFFFF140)) /* AIC Fast Forcing Enable Register */
#define AT91C_AIC_FFDR (AT91_REG(0xFFFFF144)) /* AIC Fast Forcing Disable Register */
#define AT91C_AIC_FFSR (AT91_REG(0xFFFFF148)) /* AIC Fast Forcing Status Register */
/* ========== Register definition for DBGU peripheral ========== */
#define AT91C_DBGU_EXID (AT91_REG(0xFFFFF244)) /* DBGU Chip ID Extension Register */
#define AT91C_DBGU_BRGR (AT91_REG(0xFFFFF220)) /* DBGU Baud Rate Generator Register */
#define AT91C_DBGU_IDR (AT91_REG(0xFFFFF20C)) /* DBGU Interrupt Disable Register */
#define AT91C_DBGU_CSR (AT91_REG(0xFFFFF214)) /* DBGU Channel Status Register */
#define AT91C_DBGU_CIDR (AT91_REG(0xFFFFF240)) /* DBGU Chip ID Register */
#define AT91C_DBGU_MR (AT91_REG(0xFFFFF204)) /* DBGU Mode Register */
#define AT91C_DBGU_IMR (AT91_REG(0xFFFFF210)) /* DBGU Interrupt Mask Register */
#define AT91C_DBGU_CR (AT91_REG(0xFFFFF200)) /* DBGU Control Register */
#define AT91C_DBGU_FNTR (AT91_REG(0xFFFFF248)) /* DBGU Force NTRST Register */
#define AT91C_DBGU_THR (AT91_REG(0xFFFFF21C)) /* DBGU Transmitter Holding Register */
#define AT91C_DBGU_RHR (AT91_REG(0xFFFFF218)) /* DBGU Receiver Holding Register */
#define AT91C_DBGU_IER (AT91_REG(0xFFFFF208)) /* DBGU Interrupt Enable Register */
/* ========== Register definition for PIO peripheral ========== */
#define AT91C_PIO_ODR (AT91_REG(0xFFFFF414)) /* PIOA Output Disable Registerr */
#define AT91C_PIO_SODR (AT91_REG(0xFFFFF430)) /* PIOA Set Output Data Register */
#define AT91C_PIO_ISR (AT91_REG(0xFFFFF44C)) /* PIOA Interrupt Status Register */
#define AT91C_PIO_ABSR (AT91_REG(0xFFFFF478)) /* PIOA AB Select Status Register */
#define AT91C_PIO_IER (AT91_REG(0xFFFFF440)) /* PIOA Interrupt Enable Register */
#define AT91C_PIO_PPUDR (AT91_REG(0xFFFFF460)) /* PIOA Pull-up Disable Register */
#define AT91C_PIO_IMR (AT91_REG(0xFFFFF448)) /* PIOA Interrupt Mask Register */
#define AT91C_PIO_PER (AT91_REG(0xFFFFF400)) /* PIOA PIO Enable Register */
#define AT91C_PIO_IFDR (AT91_REG(0xFFFFF424)) /* PIOA Input Filter Disable Register */
#define AT91C_PIO_OWDR (AT91_REG(0xFFFFF4A4)) /* PIOA Output Write Disable Register */
#define AT91C_PIO_MDSR (AT91_REG(0xFFFFF458)) /* PIOA Multi-driver Status Register */
#define AT91C_PIO_IDR (AT91_REG(0xFFFFF444)) /* PIOA Interrupt Disable Register */
#define AT91C_PIO_ODSR (AT91_REG(0xFFFFF438)) /* PIOA Output Data Status Register */
#define AT91C_PIO_PPUSR (AT91_REG(0xFFFFF468)) /* PIOA Pull-up Status Register */
#define AT91C_PIO_OWSR (AT91_REG(0xFFFFF4A8)) /* PIOA Output Write Status Register */
#define AT91C_PIO_BSR (AT91_REG(0xFFFFF474)) /* PIOA Select B Register */
#define AT91C_PIO_OWER (AT91_REG(0xFFFFF4A0)) /* PIOA Output Write Enable Register */
#define AT91C_PIO_IFER (AT91_REG(0xFFFFF420)) /* PIOA Input Filter Enable Register */
#define AT91C_PIO_PDSR (AT91_REG(0xFFFFF43C)) /* PIOA Pin Data Status Register */
#define AT91C_PIO_PPUER (AT91_REG(0xFFFFF464)) /* PIOA Pull-up Enable Register */
#define AT91C_PIO_OSR (AT91_REG(0xFFFFF418)) /* PIOA Output Status Register */
#define AT91C_PIO_ASR (AT91_REG(0xFFFFF470)) /* PIOA Select A Register */
#define AT91C_PIO_MDDR (AT91_REG(0xFFFFF454)) /* PIOA Multi-driver Disable Register */
#define AT91C_PIO_CODR (AT91_REG(0xFFFFF434)) /* PIOA Clear Output Data Register */
#define AT91C_PIO_MDER (AT91_REG(0xFFFFF450)) /* PIOA Multi-driver Enable Register */
#define AT91C_PIO_PDR (AT91_REG(0xFFFFF404)) /* PIOA PIO Disable Register */
#define AT91C_PIO_IFSR (AT91_REG(0xFFFFF428)) /* PIOA Input Filter Status Register */
#define AT91C_PIO_OER (AT91_REG(0xFFFFF410)) /* PIOA Output Enable Register */
#define AT91C_PIO_PSR (AT91_REG(0xFFFFF408)) /* PIOA PIO Status Register */
// ========== Register definition for PIOA peripheral ==========
#define AT91C_PIOA_IMR (AT91_REG(0xFFFFF448)) // (PIOA) Interrupt Mask Register
#define AT91C_PIOA_IER (AT91_REG(0xFFFFF440)) // (PIOA) Interrupt Enable Register
#define AT91C_PIOA_OWDR (AT91_REG(0xFFFFF4A4)) // (PIOA) Output Write Disable Register
#define AT91C_PIOA_ISR (AT91_REG(0xFFFFF44C)) // (PIOA) Interrupt Status Register
#define AT91C_PIOA_PPUDR (AT91_REG(0xFFFFF460)) // (PIOA) Pull-up Disable Register
#define AT91C_PIOA_MDSR (AT91_REG(0xFFFFF458)) // (PIOA) Multi-driver Status Register
#define AT91C_PIOA_MDER (AT91_REG(0xFFFFF450)) // (PIOA) Multi-driver Enable Register
#define AT91C_PIOA_PER (AT91_REG(0xFFFFF400)) // (PIOA) PIO Enable Register
#define AT91C_PIOA_PSR (AT91_REG(0xFFFFF408)) // (PIOA) PIO Status Register
#define AT91C_PIOA_OER (AT91_REG(0xFFFFF410)) // (PIOA) Output Enable Register
#define AT91C_PIOA_BSR (AT91_REG(0xFFFFF474)) // (PIOA) Select B Register
#define AT91C_PIOA_PPUER (AT91_REG(0xFFFFF464)) // (PIOA) Pull-up Enable Register
#define AT91C_PIOA_MDDR (AT91_REG(0xFFFFF454)) // (PIOA) Multi-driver Disable Register
#define AT91C_PIOA_PDR (AT91_REG(0xFFFFF404)) // (PIOA) PIO Disable Register
#define AT91C_PIOA_ODR (AT91_REG(0xFFFFF414)) // (PIOA) Output Disable Registerr
#define AT91C_PIOA_IFDR (AT91_REG(0xFFFFF424)) // (PIOA) Input Filter Disable Register
#define AT91C_PIOA_ABSR (AT91_REG(0xFFFFF478)) // (PIOA) AB Select Status Register
#define AT91C_PIOA_ASR (AT91_REG(0xFFFFF470)) // (PIOA) Select A Register
#define AT91C_PIOA_PPUSR (AT91_REG(0xFFFFF468)) // (PIOA) Pull-up Status Register
#define AT91C_PIOA_ODSR (AT91_REG(0xFFFFF438)) // (PIOA) Output Data Status Register
#define AT91C_PIOA_SODR (AT91_REG(0xFFFFF430)) // (PIOA) Set Output Data Register
#define AT91C_PIOA_IFSR (AT91_REG(0xFFFFF428)) // (PIOA) Input Filter Status Register
#define AT91C_PIOA_IFER (AT91_REG(0xFFFFF420)) // (PIOA) Input Filter Enable Register
#define AT91C_PIOA_OSR (AT91_REG(0xFFFFF418)) // (PIOA) Output Status Register
#define AT91C_PIOA_IDR (AT91_REG(0xFFFFF444)) // (PIOA) Interrupt Disable Register
#define AT91C_PIOA_PDSR (AT91_REG(0xFFFFF43C)) // (PIOA) Pin Data Status Register
#define AT91C_PIOA_CODR (AT91_REG(0xFFFFF434)) // (PIOA) Clear Output Data Register
#define AT91C_PIOA_OWSR (AT91_REG(0xFFFFF4A8)) // (PIOA) Output Write Status Register
#define AT91C_PIOA_OWER (AT91_REG(0xFFFFF4A0)) // (PIOA) Output Write Enable Register
// ========== Register definition for PIOB peripheral ==========
#define AT91C_PIOB_OWSR (AT91_REG(0xFFFFF6A8)) // (PIOB) Output Write Status Register
#define AT91C_PIOB_PPUSR (AT91_REG(0xFFFFF668)) // (PIOB) Pull-up Status Register
#define AT91C_PIOB_PPUDR (AT91_REG(0xFFFFF660)) // (PIOB) Pull-up Disable Register
#define AT91C_PIOB_MDSR (AT91_REG(0xFFFFF658)) // (PIOB) Multi-driver Status Register
#define AT91C_PIOB_MDER (AT91_REG(0xFFFFF650)) // (PIOB) Multi-driver Enable Register
#define AT91C_PIOB_IMR (AT91_REG(0xFFFFF648)) // (PIOB) Interrupt Mask Register
#define AT91C_PIOB_OSR (AT91_REG(0xFFFFF618)) // (PIOB) Output Status Register
#define AT91C_PIOB_OER (AT91_REG(0xFFFFF610)) // (PIOB) Output Enable Register
#define AT91C_PIOB_PSR (AT91_REG(0xFFFFF608)) // (PIOB) PIO Status Register
#define AT91C_PIOB_PER (AT91_REG(0xFFFFF600)) // (PIOB) PIO Enable Register
#define AT91C_PIOB_BSR (AT91_REG(0xFFFFF674)) // (PIOB) Select B Register
#define AT91C_PIOB_PPUER (AT91_REG(0xFFFFF664)) // (PIOB) Pull-up Enable Register
#define AT91C_PIOB_IFDR (AT91_REG(0xFFFFF624)) // (PIOB) Input Filter Disable Register
#define AT91C_PIOB_ODR (AT91_REG(0xFFFFF614)) // (PIOB) Output Disable Registerr
#define AT91C_PIOB_ABSR (AT91_REG(0xFFFFF678)) // (PIOB) AB Select Status Register
#define AT91C_PIOB_ASR (AT91_REG(0xFFFFF670)) // (PIOB) Select A Register
#define AT91C_PIOB_IFER (AT91_REG(0xFFFFF620)) // (PIOB) Input Filter Enable Register
#define AT91C_PIOB_IFSR (AT91_REG(0xFFFFF628)) // (PIOB) Input Filter Status Register
#define AT91C_PIOB_SODR (AT91_REG(0xFFFFF630)) // (PIOB) Set Output Data Register
#define AT91C_PIOB_ODSR (AT91_REG(0xFFFFF638)) // (PIOB) Output Data Status Register
#define AT91C_PIOB_CODR (AT91_REG(0xFFFFF634)) // (PIOB) Clear Output Data Register
#define AT91C_PIOB_PDSR (AT91_REG(0xFFFFF63C)) // (PIOB) Pin Data Status Register
#define AT91C_PIOB_OWER (AT91_REG(0xFFFFF6A0)) // (PIOB) Output Write Enable Register
#define AT91C_PIOB_IER (AT91_REG(0xFFFFF640)) // (PIOB) Interrupt Enable Register
#define AT91C_PIOB_OWDR (AT91_REG(0xFFFFF6A4)) // (PIOB) Output Write Disable Register
#define AT91C_PIOB_MDDR (AT91_REG(0xFFFFF654)) // (PIOB) Multi-driver Disable Register
#define AT91C_PIOB_ISR (AT91_REG(0xFFFFF64C)) // (PIOB) Interrupt Status Register
#define AT91C_PIOB_IDR (AT91_REG(0xFFFFF644)) // (PIOB) Interrupt Disable Register
#define AT91C_PIOB_PDR (AT91_REG(0xFFFFF604)) // (PIOB) PIO Disable Register
/* ========== Register definition for PMC peripheral ========== */
#define AT91C_PMC_SCER (AT91_REG(0xFFFFFC00)) /* PMC System Clock Enable Register */
#define AT91C_PMC_SCDR (AT91_REG(0xFFFFFC04)) /* PMC System Clock Disable Register */
#define AT91C_PMC_SCSR (AT91_REG(0xFFFFFC08)) /* PMC System Clock Status Register */
#define AT91C_PMC_PCER (AT91_REG(0xFFFFFC10)) /* PMC Peripheral Clock Enable Register */
#define AT91C_PMC_PCDR (AT91_REG(0xFFFFFC14)) /* PMC Peripheral Clock Disable Register */
#define AT91C_PMC_PCSR (AT91_REG(0xFFFFFC18)) /* PMC Peripheral Clock Status Register */
#define AT91C_PMC_MOR (AT91_REG(0xFFFFFC20)) /* PMC Main Oscillator Register */
#define AT91C_PMC_MCFR (AT91_REG(0xFFFFFC24)) /* PMC Main Clock Frequency Register */
#define AT91C_PMC_PLLR (AT91_REG(0xFFFFFC2C)) /* PMC PLL Register */
#define AT91C_PMC_MCKR (AT91_REG(0xFFFFFC30)) /* PMC Master Clock Register */
#define AT91C_PMC_PCKR (AT91_REG(0xFFFFFC40)) /* PMC Programmable Clock Register */
#define AT91C_PMC_IER (AT91_REG(0xFFFFFC60)) /* PMC Interrupt Enable Register */
#define AT91C_PMC_IDR (AT91_REG(0xFFFFFC64)) /* PMC Interrupt Disable Register */
#define AT91C_PMC_SR (AT91_REG(0xFFFFFC68)) /* PMC Status Register */
#define AT91C_PMC_IMR (AT91_REG(0xFFFFFC6C)) /* PMC Interrupt Mask Register */
/******************************************************************************/
/* PERIPHERAL ID DEFINITIONS FOR AT91SAM7S64 */
/******************************************************************************/
#define AT91C_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
#define AT91C_ID_SYS 1 /* System Peripheral */
#define AT91C_ID_PIOA 2 /* Parallel IO Controller A */
#define AT91C_ID_PIOB 3 /* Parallel IO Controller B */
#define AT91C_ID_ADC 4 /* Analog-to-Digital Converter */
#define AT91C_ID_SPI 5 /* Serial Peripheral Interface */
#define AT91C_ID_US0 6 /* USART 0 */
#define AT91C_ID_US1 7 /* USART 1 */
#define AT91C_ID_SSC 8 /* Serial Synchronous Controller */
#define AT91C_ID_TWI 9 /* Two-Wire Interface */
#define AT91C_ID_PWMC 10 /* PWM Controller */
#define AT91C_ID_UDP 11 /* USB Device Port */
#define AT91C_ID_TC0 12 /* Timer Counter 0 */
#define AT91C_ID_TC1 13 /* Timer Counter 1 */
#define AT91C_ID_TC2 14 /* Timer Counter 2 */
#define AT91C_ID_15 15 /* Reserved */
#define AT91C_ID_16 16 /* Reserved */
#define AT91C_ID_17 17 /* Reserved */
#define AT91C_ID_18 18 /* Reserved */
#define AT91C_ID_19 19 /* Reserved */
#define AT91C_ID_20 20 /* Reserved */
#define AT91C_ID_21 21 /* Reserved */
#define AT91C_ID_22 22 /* Reserved */
#define AT91C_ID_23 23 /* Reserved */
#define AT91C_ID_24 24 /* Reserved */
#define AT91C_ID_25 25 /* Reserved */
#define AT91C_ID_26 26 /* Reserved */
#define AT91C_ID_27 27 /* Reserved */
#define AT91C_ID_28 28 /* Reserved */
#define AT91C_ID_29 29 /* Reserved */
#define AT91C_ID_IRQ0 30 /* Advanced Interrupt Controller (IRQ0) */
#define AT91C_ID_IRQ1 31 /* Advanced Interrupt Controller (IRQ1) */
#define AT91C_ALL_INT 0xC0007FF7 /* ALL VALID INTERRUPTS */
/*****************************/
/* CPU Mode */
/*****************************/
#define USERMODE 0x10
#define FIQMODE 0x11
#define IRQMODE 0x12
#define SVCMODE 0x13
#define ABORTMODE 0x17
#define UNDEFMODE 0x1b
#define MODEMASK 0x1f
#define NOINT 0xc0
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,23 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
if rtconfig.PLATFORM in ['armcc', 'armclang']:
src += Glob('*_rvds.S')
if rtconfig.PLATFORM in ['gcc']:
src += Glob('*_init.S')
src += Glob('*_gcc.S')
if rtconfig.PLATFORM in ['iccarm']:
src += Glob('*_iar.S')
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-13 Bernard first version
*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable()/*
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level)/*
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)/*
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} /* push pc (lr should be pushed in place of PC) */
stmfd sp!, {r0-r12, lr} /* push lr & register file */
mrs r4, cpsr
stmfd sp!, {r4} /* push cpsr */
mrs r4, spsr
stmfd sp!, {r4} /* push spsr */
str sp, [r0] /* store sp in preempted tasks TCB */
ldr sp, [r1] /* get new task stack pointer */
ldmfd sp!, {r4} /* pop new task spsr */
msr spsr_cxsf, r4
ldmfd sp!, {r4} /* pop new task cpsr */
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} /* pop new task r0-r12, lr & pc */
/*
* void rt_hw_context_switch_to(rt_uint32 to)/*
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] /* get new task stack pointer */
ldmfd sp!, {r4} /* pop new task spsr */
msr spsr_cxsf, r4
ldmfd sp!, {r4} /* pop new task cpsr */
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} /* pop new task r0-r12, lr & pc */
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 /* set rt_thread_switch_interrupt_flag to 1 */
str r3, [r2]
ldr r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
str r1, [r2]
mov pc, lr

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-01-20 Bernard first version
*/
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END

View File

@ -0,0 +1,19 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard first version
*/
#include <rtthread.h>
#include "AT91SAM7S.h"
/**
* @addtogroup AT91SAM7
*/
/*@{*/
/*@}*/

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard first version
*/
#include <rtthread.h>
#include "AT91SAM7S.h"
#define MAX_HANDLERS 32
extern rt_atomic_t rt_interrupt_nest;
rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
rt_uint32_t rt_thread_switch_interrupt_flag;
/**
* @addtogroup AT91SAM7
*/
/*@{*/
void rt_hw_interrupt_handler(int vector)
{
rt_kprintf("Unhandled interrupt %d occured!!!\n", vector);
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init()
{
rt_base_t index;
for (index = 0; index < MAX_HANDLERS; index ++)
{
AT91C_AIC_SVR(index) = (rt_uint32_t)rt_hw_interrupt_handler;
}
/* init interrupt nest, and context in thread sp */
rt_interrupt_nest = 0;
rt_interrupt_from_thread = 0;
rt_interrupt_to_thread = 0;
rt_thread_switch_interrupt_flag = 0;
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
/* disable interrupt */
AT91C_AIC_IDCR = 1 << vector;
/* clear interrupt */
AT91C_AIC_ICCR = 1 << vector;
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
AT91C_AIC_IECR = 1 << vector;
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
void rt_hw_interrupt_install(int vector, rt_isr_handler_t new_handler, rt_isr_handler_t *old_handler)
{
if(vector >= 0 && vector < MAX_HANDLERS)
{
if (*old_handler != RT_NULL) *old_handler = (rt_isr_handler_t)AT91C_AIC_SVR(vector);
if (new_handler != RT_NULL) AT91C_AIC_SVR(vector) = (rt_uint32_t)new_handler;
}
}
/*@}*/

View File

@ -0,0 +1,383 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard first version
* 2009-05-14 Bernard add RT-THread device interface
*/
#include <rthw.h>
#include <rtthread.h>
#include "AT91SAM7S.h"
#include "serial.h"
/**
* @addtogroup AT91SAM7
*/
/*@{*/
typedef volatile rt_uint32_t REG32;
struct rt_at91serial_hw
{
REG32 US_CR; // Control Register
REG32 US_MR; // Mode Register
REG32 US_IER; // Interrupt Enable Register
REG32 US_IDR; // Interrupt Disable Register
REG32 US_IMR; // Interrupt Mask Register
REG32 US_CSR; // Channel Status Register
REG32 US_RHR; // Receiver Holding Register
REG32 US_THR; // Transmitter Holding Register
REG32 US_BRGR; // Baud Rate Generator Register
REG32 US_RTOR; // Receiver Time-out Register
REG32 US_TTGR; // Transmitter Time-guard Register
REG32 Reserved0[5]; //
REG32 US_FIDI; // FI_DI_Ratio Register
REG32 US_NER; // Nb Errors Register
REG32 Reserved1[1]; //
REG32 US_IF; // IRDA_FILTER Register
REG32 Reserved2[44]; //
REG32 US_RPR; // Receive Pointer Register
REG32 US_RCR; // Receive Counter Register
REG32 US_TPR; // Transmit Pointer Register
REG32 US_TCR; // Transmit Counter Register
REG32 US_RNPR; // Receive Next Pointer Register
REG32 US_RNCR; // Receive Next Counter Register
REG32 US_TNPR; // Transmit Next Pointer Register
REG32 US_TNCR; // Transmit Next Counter Register
REG32 US_PTCR; // PDC Transfer Control Register
REG32 US_PTSR; // PDC Transfer Status Register
};
struct rt_at91serial
{
struct rt_device parent;
struct rt_at91serial_hw* hw_base;
rt_uint16_t peripheral_id;
rt_uint32_t baudrate;
/* reception field */
rt_uint16_t save_index, read_index;
rt_uint8_t rx_buffer[RT_UART_RX_BUFFER_SIZE];
};
#ifdef RT_USING_UART1
struct rt_at91serial serial1;
#endif
#ifdef RT_USING_UART2
struct rt_at91serial serial2;
#endif
static void rt_hw_serial_isr(int irqno)
{
rt_base_t level;
struct rt_device* device;
struct rt_at91serial* serial = RT_NULL;
if (irqno == AT91C_ID_US0)
{
#ifdef RT_USING_UART1
/* serial 1 */
serial = &serial1;
#endif
}
else if (irqno == AT91C_ID_US1)
{
#ifdef RT_USING_UART2
/* serial 2 */
serial = &serial2;
#endif
}
RT_ASSERT(serial != RT_NULL);
/* get generic device object */
device = (rt_device_t)serial;
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* get received character */
serial->rx_buffer[serial->save_index] = serial->hw_base->US_RHR;
/* move to next position */
serial->save_index ++;
if (serial->save_index >= RT_UART_RX_BUFFER_SIZE)
serial->save_index = 0;
/* if the next position is read index, discard this 'read char' */
if (serial->save_index == serial->read_index)
{
serial->read_index ++;
if (serial->read_index >= RT_UART_RX_BUFFER_SIZE)
serial->read_index = 0;
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
/* indicate to upper layer application */
if (device->rx_indicate != RT_NULL)
device->rx_indicate(device, 1);
/* ack interrupt */
AT91C_AIC_EOICR = 1;
}
static rt_err_t rt_serial_init (rt_device_t dev)
{
rt_uint32_t bd;
struct rt_at91serial* serial = (struct rt_at91serial*) dev;
RT_ASSERT(serial != RT_NULL);
/* must be US0 or US1 */
RT_ASSERT(((serial->peripheral_id == AT91C_ID_US0) ||
(serial->peripheral_id == AT91C_ID_US1)));
/* Enable Clock for USART */
AT91C_PMC_PCER = 1 << serial->peripheral_id;
/* Enable RxD0 and TxDO Pin */
if (serial->peripheral_id == AT91C_ID_US0)
{
/* set pinmux */
AT91C_PIO_PDR = (1 << 5) | (1 << 6);
}
else if (serial->peripheral_id == AT91C_ID_US1)
{
/* set pinmux */
AT91C_PIO_PDR = (1 << 21) | (1 << 22);
}
serial->hw_base->US_CR = AT91C_US_RSTRX | /* Reset Receiver */
AT91C_US_RSTTX | /* Reset Transmitter */
AT91C_US_RXDIS | /* Receiver Disable */
AT91C_US_TXDIS; /* Transmitter Disable */
serial->hw_base->US_MR = AT91C_US_USMODE_NORMAL | /* Normal Mode */
AT91C_US_CLKS_CLOCK | /* Clock = MCK */
AT91C_US_CHRL_8_BITS | /* 8-bit Data */
AT91C_US_PAR_NONE | /* No Parity */
AT91C_US_NBSTOP_1_BIT; /* 1 Stop Bit */
/* set baud rate divisor */
bd = ((MCK*10)/(serial->baudrate * 16));
if ((bd % 10) >= 5) bd = (bd / 10) + 1;
else bd /= 10;
serial->hw_base->US_BRGR = bd;
serial->hw_base->US_CR = AT91C_US_RXEN | /* Receiver Enable */
AT91C_US_TXEN; /* Transmitter Enable */
/* reset rx index */
serial->save_index = 0;
serial->read_index = 0;
/* reset rx buffer */
rt_memset(serial->rx_buffer, 0, RT_UART_RX_BUFFER_SIZE);
return RT_EOK;
}
static rt_err_t rt_serial_open(rt_device_t dev, rt_uint16_t oflag)
{
struct rt_at91serial *serial = (struct rt_at91serial*)dev;
RT_ASSERT(serial != RT_NULL);
if (dev->flag & RT_DEVICE_FLAG_INT_RX)
{
/* enable UART rx interrupt */
serial->hw_base->US_IER = 1 << 0; /* RxReady interrupt */
serial->hw_base->US_IMR |= 1 << 0; /* umask RxReady interrupt */
/* install UART handler */
rt_hw_interrupt_install(serial->peripheral_id, rt_hw_serial_isr, RT_NULL);
AT91C_AIC_SMR(serial->peripheral_id) = 5 | (0x01 << 5);
rt_hw_interrupt_umask(serial->peripheral_id);
}
return RT_EOK;
}
static rt_err_t rt_serial_close(rt_device_t dev)
{
struct rt_at91serial *serial = (struct rt_at91serial*)dev;
RT_ASSERT(serial != RT_NULL);
if (dev->flag & RT_DEVICE_FLAG_INT_RX)
{
/* disable interrupt */
serial->hw_base->US_IDR = 1 << 0; /* RxReady interrupt */
serial->hw_base->US_IMR &= ~(1 << 0); /* mask RxReady interrupt */
}
return RT_EOK;
}
static rt_ssize_t rt_serial_read (rt_device_t dev, rt_off_t pos, void* buffer, rt_size_t size)
{
rt_uint8_t* ptr;
struct rt_at91serial *serial = (struct rt_at91serial*)dev;
RT_ASSERT(serial != RT_NULL);
/* point to buffer */
ptr = (rt_uint8_t*) buffer;
if (dev->flag & RT_DEVICE_FLAG_INT_RX)
{
while (size)
{
/* interrupt receive */
rt_base_t level;
/* disable interrupt */
level = rt_hw_interrupt_disable();
if (serial->read_index != serial->save_index)
{
*ptr = serial->rx_buffer[serial->read_index];
serial->read_index ++;
if (serial->read_index >= RT_UART_RX_BUFFER_SIZE)
serial->read_index = 0;
}
else
{
/* no data in rx buffer */
/* enable interrupt */
rt_hw_interrupt_enable(level);
break;
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
ptr ++; size --;
}
return (rt_uint32_t)ptr - (rt_uint32_t)buffer;
}
else if (dev->flag & RT_DEVICE_FLAG_DMA_RX)
{
/* not support right now */
RT_ASSERT(0);
}
else
{
/* poll mode */
while (size)
{
/* Wait for Full Rx Buffer */
while (!(serial->hw_base->US_CSR & AT91C_US_RXRDY));
/* Read Character */
*ptr = serial->hw_base->US_RHR;
ptr ++;
size --;
}
return (rt_size_t)ptr - (rt_size_t)buffer;
}
return 0;
}
static rt_ssize_t rt_serial_write (rt_device_t dev, rt_off_t pos, const void* buffer, rt_size_t size)
{
rt_uint8_t* ptr;
struct rt_at91serial *serial = (struct rt_at91serial*)dev;
RT_ASSERT(serial != RT_NULL);
ptr = (rt_uint8_t*) buffer;
if (dev->open_flag & RT_DEVICE_OFLAG_WRONLY)
{
if (dev->flag & RT_DEVICE_FLAG_STREAM)
{
/* it's a stream mode device */
while (size)
{
/* stream mode */
if (*ptr == '\n')
{
while (!(serial->hw_base->US_CSR & AT91C_US_TXRDY));
serial->hw_base->US_THR = '\r';
}
/* Wait for Empty Tx Buffer */
while (!(serial->hw_base->US_CSR & AT91C_US_TXRDY));
/* Transmit Character */
serial->hw_base->US_THR = *ptr;
ptr ++; size --;
}
}
else
{
while (size)
{
/* Wait for Empty Tx Buffer */
while (!(serial->hw_base->US_CSR & AT91C_US_TXRDY));
/* Transmit Character */
serial->hw_base->US_THR = *ptr;
ptr ++; size --;
}
}
}
return (rt_size_t)ptr - (rt_size_t)buffer;
}
static rt_err_t rt_serial_control (rt_device_t dev, int cmd, void *args)
{
return RT_EOK;
}
rt_err_t rt_hw_serial_init()
{
rt_device_t device;
#ifdef RT_USING_UART1
device = (rt_device_t) &serial1;
/* init serial device private data */
serial1.hw_base = (struct rt_at91serial_hw*)AT91C_BASE_US0;
serial1.peripheral_id = AT91C_ID_US0;
serial1.baudrate = 115200;
/* set device virtual interface */
device->init = rt_serial_init;
device->open = rt_serial_open;
device->close = rt_serial_close;
device->read = rt_serial_read;
device->write = rt_serial_write;
device->control = rt_serial_control;
/* register uart1 on device subsystem */
rt_device_register(device, "uart1", RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX);
#endif
#ifdef RT_USING_UART2
device = (rt_device_t) &serial2;
serial2.hw_base = (struct rt_at91serial_hw*)AT91C_BASE_US1;
serial2.peripheral_id = AT91C_ID_US1;
serial2.baudrate = 115200;
/* set device virtual interface */
device->init = rt_serial_init;
device->open = rt_serial_open;
device->close = rt_serial_close;
device->read = rt_serial_read;
device->write = rt_serial_write;
device->control = rt_serial_control;
/* register uart2 on device subsystem */
rt_device_register(device, "uart2", RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX);
#endif
return RT_EOK;
}
/*@}*/

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __RT_SERIAL_H__
#define __RT_SERIAL_H__
#ifndef AT91C_BASE_US0
#define AT91C_BASE_US0 (0xFFFC0000) // (US0) Base Address
#endif
#ifndef AT91C_BASE_US1
#define AT91C_BASE_US1 (0xFFFC4000) // (US1) Base Address
#endif
#define AT91C_US_RXRDY ((unsigned int) 0x1 << 0) /* US RXRDY Interrupt */
#define AT91C_US_TXRDY ((unsigned int) 0x1 << 1) /* US TXRDY Interrupt */
#define AT91C_US_RSTRX ((unsigned int) 0x1 << 2) /* US Reset Receiver */
#define AT91C_US_RSTTX ((unsigned int) 0x1 << 3) /* US Reset Transmitter */
#define AT91C_US_RXEN ((unsigned int) 0x1 << 4) /* US Receiver Enable */
#define AT91C_US_RXDIS ((unsigned int) 0x1 << 5) /* US Receiver Disable */
#define AT91C_US_TXEN ((unsigned int) 0x1 << 6) /* US Transmitter Enable */
#define AT91C_US_TXDIS ((unsigned int) 0x1 << 7) /* US Transmitter Disable */
#define AT91C_US_RSTSTA ((unsigned int) 0x1 << 8) /* US Reset Status Bits */
#define AT91C_US_USMODE_NORMAL ((unsigned int) 0x0) /* USAR) Normal */
#define AT91C_US_USMODE_RS485 ((unsigned int) 0x1) /* USAR) RS485 */
#define AT91C_US_USMODE_HWHSH ((unsigned int) 0x2) /* USAR) Hardware Handshaking */
#define AT91C_US_USMODE_MODEM ((unsigned int) 0x3) /* USAR) Modem */
#define AT91C_US_USMODE_ISO7816_0 ((unsigned int) 0x4) /* USAR) ISO7816 protocol: T = 0 */
#define AT91C_US_USMODE_ISO7816_1 ((unsigned int) 0x6) /* USAR) ISO7816 protocol: T = 1 */
#define AT91C_US_USMODE_IRDA ((unsigned int) 0x8) /* USAR) IrDA */
#define AT91C_US_USMODE_SWHSH ((unsigned int) 0xC) /* USAR) Software Handshaking */
#define AT91C_US_CLKS_CLOCK ((unsigned int) 0x0 << 4) /* USAR) Clock */
#define AT91C_US_CLKS_FDIV1 ((unsigned int) 0x1 << 4) /* USAR) fdiv1 */
#define AT91C_US_CLKS_SLOW ((unsigned int) 0x2 << 4) /* USAR) slow_clock (ARM) */
#define AT91C_US_CLKS_EXT ((unsigned int) 0x3 << 4) /* USAR) External (SCK) */
#define AT91C_US_CHRL_5_BITS ((unsigned int) 0x0 << 6) /* USAR) Character Length: 5 bits */
#define AT91C_US_CHRL_6_BITS ((unsigned int) 0x1 << 6) /* USAR) Character Length: 6 bits */
#define AT91C_US_CHRL_7_BITS ((unsigned int) 0x2 << 6) /* USAR) Character Length: 7 bits */
#define AT91C_US_CHRL_8_BITS ((unsigned int) 0x3 << 6) /* USAR) Character Length: 8 bits */
#define AT91C_US_PAR_EVEN ((unsigned int) 0x0 << 9) /* DBGU Even Parity */
#define AT91C_US_PAR_ODD ((unsigned int) 0x1 << 9) /* DBGU Odd Parity */
#define AT91C_US_PAR_SPACE ((unsigned int) 0x2 << 9) /* DBGU Parity forced to 0 (Space) */
#define AT91C_US_PAR_MARK ((unsigned int) 0x3 << 9) /* DBGU Parity forced to 1 (Mark) */
#define AT91C_US_PAR_NONE ((unsigned int) 0x4 << 9) /* DBGU No Parity */
#define AT91C_US_PAR_MULTI_DROP ((unsigned int) 0x6 << 9) /* DBGU Multi-drop mode */
#define AT91C_US_NBSTOP_1_BIT ((unsigned int) 0x0 << 12) /* USART 1 stop bit */
#define AT91C_US_NBSTOP_15_BIT ((unsigned int) 0x1 << 12) /* USART Asynchronous (SYNC=0) 2 stop bits Synchronous (SYNC=1) 2 stop bits */
#define AT91C_US_NBSTOP_2_BIT ((unsigned int) 0x2 << 12) /* USART 2 stop bits */
#define MCK 48054857
#define BR 115200 /* Baud Rate */
#define BRD (MCK/16/BR) /* Baud Rate Divisor */
#endif

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard the first version
*/
#include <rtthread.h>
#include "AT91SAM7S.h"
/**
* @addtogroup AT91SAM7
*/
/*@{*/
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
rt_uint8_t *stack_addr, void *texit)
{
rt_uint32_t *stk;
stack_addr += sizeof(rt_uint32_t);
stack_addr = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stack_addr, 8);
stk = (rt_uint32_t *)stack_addr;
*(--stk) = (rt_uint32_t)tentry; /* entry point */
*(--stk) = (rt_uint32_t)texit; /* lr */
*(--stk) = 0xdeadbeef; /* r12 */
*(--stk) = 0xdeadbeef; /* r11 */
*(--stk) = 0xdeadbeef; /* r10 */
*(--stk) = 0xdeadbeef; /* r9 */
*(--stk) = 0xdeadbeef; /* r8 */
*(--stk) = 0xdeadbeef; /* r7 */
*(--stk) = 0xdeadbeef; /* r6 */
*(--stk) = 0xdeadbeef; /* r5 */
*(--stk) = 0xdeadbeef; /* r4 */
*(--stk) = 0xdeadbeef; /* r3 */
*(--stk) = 0xdeadbeef; /* r2 */
*(--stk) = 0xdeadbeef; /* r1 */
*(--stk) = (rt_uint32_t)parameter; /* r0 : argument */
*(--stk) = SVCMODE; /* cpsr */
*(--stk) = SVCMODE; /* spsr */
/* return task's current stack address */
return (rt_uint8_t *)stk;
}
/*@}*/

View File

@ -0,0 +1,233 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-31 Bernard first version
*/
/* Internal Memory Base Addresses */
.equ FLASH_BASE, 0x00100000
.equ RAM_BASE, 0x00200000
/* Stack Configuration */
.equ TOP_STACK, 0x00204000
.equ UND_STACK_SIZE, 0x00000100
.equ SVC_STACK_SIZE, 0x00000400
.equ ABT_STACK_SIZE, 0x00000100
.equ FIQ_STACK_SIZE, 0x00000100
.equ IRQ_STACK_SIZE, 0x00000100
.equ USR_STACK_SIZE, 0x00000004
/* ARM architecture definitions */
.equ MODE_USR, 0x10
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ MODE_SYS, 0x1F
.equ I_BIT, 0x80 /* when this bit is set, IRQ is disabled */
.equ F_BIT, 0x40 /* when this bit is set, FIQ is disabled */
.section .init, "ax"
.code 32
.align 0
.globl _start
_start:
b reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
nop /* reserved vector */
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_undef: .word vector_undef
_vector_swi: .word vector_swi
_vector_pabt: .word vector_pabt
_vector_dabt: .word vector_dabt
_vector_resv: .word vector_resv
_vector_irq: .word vector_irq
_vector_fiq: .word vector_fiq
/*
* rtthread bss start and end
* which are defined in linker script
*/
.globl _bss_start
_bss_start: .word __bss_start
.globl _bss_end
_bss_end: .word __bss_end
/* the system entry */
reset:
/* disable watchdog */
ldr r0, =0xFFFFFD40
ldr r1, =0x00008000
str r1, [r0, #0x04]
/* enable the main oscillator */
ldr r0, =0xFFFFFC00
ldr r1, =0x00000601
str r1, [r0, #0x20]
/* wait for main oscillator to stabilize */
moscs_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #1
beq moscs_loop
/* set up the PLL */
ldr r1, =0x00191C05
str r1, [r0, #0x2C]
/* wait for PLL to lock */
pll_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #0x04
beq pll_loop
/* select clock */
ldr r1, =0x00000007
str r1, [r0, #0x30]
/* setup stack for each mode */
ldr r0, =TOP_STACK
/* set stack */
/* undefined instruction mode */
msr cpsr_c, #MODE_UND|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #UND_STACK_SIZE
/* abort mode */
msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #ABT_STACK_SIZE
/* FIQ mode */
msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #FIQ_STACK_SIZE
/* IRQ mode */
msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #IRQ_STACK_SIZE
/* supervisor mode */
msr cpsr_c, #MODE_SVC
mov sp, r0
#ifdef __FLASH_BUILD__
/* Relocate .data section (Copy from ROM to RAM) */
ldr r1, =_etext
ldr r2, =_data
ldr r3, =_edata
data_loop:
cmp r2, r3
ldrlo r0, [r1], #4
strlo r0, [r2], #4
blo data_loop
#else
/* remap SRAM to 0x0000 */
ldr r0, =0xFFFFFF00
mov r1, #0x01
str r1, [r0]
#endif
/* mask all IRQs */
ldr r1, =0xFFFFF124
ldr r0, =0XFFFFFFFF
str r0, [r1]
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup: .word rtthread_startup
/* exception handlers */
vector_undef: b vector_undef
vector_swi : b vector_swi
vector_pabt : b vector_pabt
vector_dabt : b vector_dabt
vector_resv : b vector_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/*
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return
*/
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
rt_hw_context_switch_interrupt_do:
mov r1, #0 /* clear flag */
str r1, [r0]
ldmfd sp!, {r0-r12,lr} /* reload saved registers */
stmfd sp!, {r0-r3} /* save r0-r3 */
mov r1, sp
add sp, sp, #16 /* restore sp */
sub r2, lr, #4 /* save old task's pc to r2 */
mrs r3, spsr /* disable interrupt */
orr r0, r3, #I_BIT|F_BIT
msr spsr_c, r0
ldr r0, =.+8 /* switch to interrupted task's stack */
movs pc, r0
stmfd sp!, {r2} /* push old task's pc */
stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */
mov r4, r1 /* Special optimised code below */
mov r5, r3
ldmfd r4!, {r0-r3}
stmfd sp!, {r0-r3} /* push old task's r3-r0 */
stmfd sp!, {r5} /* push old task's psr */
mrs r4, spsr
stmfd sp!, {r4} /* push old task's spsr */
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] /* store sp in preempted tasks's TCB */
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] /* get new task's stack pointer */
ldmfd sp!, {r4} /* pop new task's spsr */
msr SPSR_cxsf, r4
ldmfd sp!, {r4} /* pop new task's psr */
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} /* pop new task's r0-r12,lr & pc */

View File

@ -0,0 +1,499 @@
;/*****************************************************************************/
;/* SAM7.S: Startup file for Atmel AT91SAM7 device series */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2005-2006 Keil Software. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
;/*
; * The SAM7.S code is executed after CPU Reset. This file may be
; * translated with the following SET symbols. In uVision these SET
; * symbols are entered under Options - ASM - Define.
; *
; * REMAP: when set the startup code remaps exception vectors from
; * on-chip RAM to address 0.
; *
; * RAM_INTVEC: when set the startup code copies exception vectors
; * from on-chip Flash to on-chip RAM.
; */
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
; Internal Memory Base Addresses
FLASH_BASE EQU 0x00100000
RAM_BASE EQU 0x00200000
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000100
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000100
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
; Reset Controller (RSTC) definitions
RSTC_BASE EQU 0xFFFFFD00 ; RSTC Base Address
RSTC_MR EQU 0x08 ; RSTC_MR Offset
;/*
;// <e> Reset Controller (RSTC)
;// <o1.0> URSTEN: User Reset Enable
;// <i> Enables NRST Pin to generate Reset
;// <o1.8..11> ERSTL: External Reset Length <0-15>
;// <i> External Reset Time in 2^(ERSTL+1) Slow Clock Cycles
;// </e>
;*/
RSTC_SETUP EQU 1
RSTC_MR_Val EQU 0xA5000401
; Embedded Flash Controller (EFC) definitions
EFC_BASE EQU 0xFFFFFF00 ; EFC Base Address
EFC0_FMR EQU 0x60 ; EFC0_FMR Offset
EFC1_FMR EQU 0x70 ; EFC1_FMR Offset
;// <e> Embedded Flash Controller 0 (EFC0)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC0_SETUP EQU 1
EFC0_FMR_Val EQU 0x00320100
;// <e> Embedded Flash Controller 1 (EFC1)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC1_SETUP EQU 0
EFC1_FMR_Val EQU 0x00320100
; Watchdog Timer (WDT) definitions
WDT_BASE EQU 0xFFFFFD40 ; WDT Base Address
WDT_MR EQU 0x04 ; WDT_MR Offset
;// <e> Watchdog Timer (WDT)
;// <o1.0..11> WDV: Watchdog Counter Value <0-4095>
;// <o1.16..27> WDD: Watchdog Delta Value <0-4095>
;// <o1.12> WDFIEN: Watchdog Fault Interrupt Enable
;// <o1.13> WDRSTEN: Watchdog Reset Enable
;// <o1.14> WDRPROC: Watchdog Reset Processor
;// <o1.28> WDDBGHLT: Watchdog Debug Halt
;// <o1.29> WDIDLEHLT: Watchdog Idle Halt
;// <o1.15> WDDIS: Watchdog Disable
;// </e>
WDT_SETUP EQU 1
WDT_MR_Val EQU 0x00008000
; Power Mangement Controller (PMC) definitions
PMC_BASE EQU 0xFFFFFC00 ; PMC Base Address
PMC_MOR EQU 0x20 ; PMC_MOR Offset
PMC_MCFR EQU 0x24 ; PMC_MCFR Offset
PMC_PLLR EQU 0x2C ; PMC_PLLR Offset
PMC_MCKR EQU 0x30 ; PMC_MCKR Offset
PMC_SR EQU 0x68 ; PMC_SR Offset
PMC_MOSCEN EQU (1<<0) ; Main Oscillator Enable
PMC_OSCBYPASS EQU (1<<1) ; Main Oscillator Bypass
PMC_OSCOUNT EQU (0xFF<<8) ; Main OScillator Start-up Time
PMC_DIV EQU (0xFF<<0) ; PLL Divider
PMC_PLLCOUNT EQU (0x3F<<8) ; PLL Lock Counter
PMC_OUT EQU (0x03<<14) ; PLL Clock Frequency Range
PMC_MUL EQU (0x7FF<<16) ; PLL Multiplier
PMC_USBDIV EQU (0x03<<28) ; USB Clock Divider
PMC_CSS EQU (3<<0) ; Clock Source Selection
PMC_PRES EQU (7<<2) ; Prescaler Selection
PMC_MOSCS EQU (1<<0) ; Main Oscillator Stable
PMC_LOCK EQU (1<<2) ; PLL Lock Status
PMC_MCKRDY EQU (1<<3) ; Master Clock Status
;// <e> Power Mangement Controller (PMC)
;// <h> Main Oscillator
;// <o1.0> MOSCEN: Main Oscillator Enable
;// <o1.1> OSCBYPASS: Oscillator Bypass
;// <o1.8..15> OSCCOUNT: Main Oscillator Startup Time <0-255>
;// </h>
;// <h> Phase Locked Loop (PLL)
;// <o2.0..7> DIV: PLL Divider <0-255>
;// <o2.16..26> MUL: PLL Multiplier <0-2047>
;// <i> PLL Output is multiplied by MUL+1
;// <o2.14..15> OUT: PLL Clock Frequency Range
;// <0=> 80..160MHz <1=> Reserved
;// <2=> 150..220MHz <3=> Reserved
;// <o2.8..13> PLLCOUNT: PLL Lock Counter <0-63>
;// <o2.28..29> USBDIV: USB Clock Divider
;// <0=> None <1=> 2 <2=> 4 <3=> Reserved
;// </h>
;// <o3.0..1> CSS: Clock Source Selection
;// <0=> Slow Clock
;// <1=> Main Clock
;// <2=> Reserved
;// <3=> PLL Clock
;// <o3.2..4> PRES: Prescaler
;// <0=> None
;// <1=> Clock / 2 <2=> Clock / 4
;// <3=> Clock / 8 <4=> Clock / 16
;// <5=> Clock / 32 <6=> Clock / 64
;// <7=> Reserved
;// </e>
PMC_SETUP EQU 1
PMC_MOR_Val EQU 0x00000601
PMC_PLLR_Val EQU 0x00191C05
PMC_MCKR_Val EQU 0x00000007
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
Vectors LDR PC,Reset_Addr
LDR PC,Undef_Addr
LDR PC,SWI_Addr
LDR PC,PAbt_Addr
LDR PC,DAbt_Addr
NOP ; Reserved Vector
LDR PC,IRQ_Addr
LDR PC,FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B PAbt_Handler
DAbt_Handler B DAbt_Handler
FIQ_Handler B FIQ_Handler
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
; Setup RSTC
IF RSTC_SETUP != 0
LDR R0, =RSTC_BASE
LDR R1, =RSTC_MR_Val
STR R1, [R0, #RSTC_MR]
ENDIF
; Setup EFC0
IF EFC0_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC0_FMR_Val
STR R1, [R0, #EFC0_FMR]
ENDIF
; Setup EFC1
IF EFC1_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC1_FMR_Val
STR R1, [R0, #EFC1_FMR]
ENDIF
; Setup WDT
IF WDT_SETUP != 0
LDR R0, =WDT_BASE
LDR R1, =WDT_MR_Val
STR R1, [R0, #WDT_MR]
ENDIF
; Setup PMC
IF PMC_SETUP != 0
LDR R0, =PMC_BASE
; Setup Main Oscillator
LDR R1, =PMC_MOR_Val
STR R1, [R0, #PMC_MOR]
; Wait until Main Oscillator is stablilized
IF (PMC_MOR_Val:AND:PMC_MOSCEN) != 0
MOSCS_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MOSCS
BEQ MOSCS_Loop
ENDIF
; Setup the PLL
IF (PMC_PLLR_Val:AND:PMC_MUL) != 0
LDR R1, =PMC_PLLR_Val
STR R1, [R0, #PMC_PLLR]
; Wait until PLL is stabilized
PLL_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_LOCK
BEQ PLL_Loop
ENDIF
; Select Clock
IF (PMC_MCKR_Val:AND:PMC_CSS) == 1 ; Main Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_CSS
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ELIF (PMC_MCKR_Val:AND:PMC_CSS) == 3 ; PLL Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_PRES
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ENDIF ; Select Clock
ENDIF ; PMC_SETUP
; Copy Exception Vectors to Internal RAM
IF :DEF:RAM_INTVEC
ADR R8, Vectors ; Source
LDR R9, =RAM_BASE ; Destination
LDMIA R8!, {R0-R7} ; Load Vectors
STMIA R9!, {R0-R7} ; Store Vectors
LDMIA R8!, {R0-R7} ; Load Handler Addresses
STMIA R9!, {R0-R7} ; Store Handler Addresses
ENDIF
; Remap on-chip RAM to address 0
MC_BASE EQU 0xFFFFFF00 ; MC Base Address
MC_RCR EQU 0x00 ; MC_RCR Offset
IF :DEF:REMAP
LDR R0, =MC_BASE
MOV R1, #1
STR R1, [R0, #MC_RCR] ; Remap
ENDIF
; Setup Stack for each mode
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #SVC_Stack_Size
; Enter User Mode and set its Stack Pointer
; MSR CPSR_c, #Mode_USR
IF :DEF:__MICROLIB
EXPORT __initial_sp
ELSE
; No usr mode stack here.
;MOV SP, R0
;SUB SL, SP, #USR_Stack_Size
ENDIF
; Enter the C code
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit|F_Bit|Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push old task's spsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task's psr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, = (Stack_Mem + IRQ_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-25 Bernard first version
*/
#include <rtthread.h>
#include <rthw.h>
#include "AT91SAM7S.h"
/**
* @addtogroup AT91SAM7
*/
/*@{*/
void rt_hw_trap_irq()
{
rt_isr_handler_t hander = (rt_isr_handler_t)AT91C_AIC_IVR;
hander(AT91C_AIC_ISR);
/* end of interrupt */
AT91C_AIC_EOICR = 0;
}
void rt_hw_trap_fiq()
{
rt_kprintf("fast interrupt request\n");
}
/*@}*/

View File

@ -0,0 +1,23 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
if rtconfig.PLATFORM in ['armcc', 'armclang']:
src += Glob('*_rvds.S')
if rtconfig.PLATFORM in ['gcc']:
src += Glob('*_init.S')
src += Glob('*_gcc.S')
if rtconfig.PLATFORM in ['iccarm']:
src += Glob('*_iar.S')
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,95 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-13 Bernard first version
*/
/*!
* \addtogroup xgs3c4510
*/
/*@{*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
stmfd sp!, {r4} @ push cpsr
mrs r4, spsr
stmfd sp!, {r4} @ push spsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
mov pc, lr

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-01-20 Bernard first version
*/
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END

View File

@ -0,0 +1,20 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard first version
*/
#include <rtthread.h>
/**
* @addtogroup AT91SAM7X
*/
/*@{*/
/*@}*/

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard first version
* 2013-03-29 aozima Modify the interrupt interface implementations.
*/
#include <rtthread.h>
#include <rthw.h>
#include "AT91SAM7X256.h"
#define MAX_HANDLERS 32
/* exception and interrupt handler table */
struct rt_irq_desc irq_desc[MAX_HANDLERS];
extern rt_atomic_t rt_interrupt_nest;
rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
rt_uint32_t rt_thread_switch_interrupt_flag;
/**
* @addtogroup AT91SAM7
*/
/*@{*/
static void rt_hw_interrupt_handler(int vector, void *param)
{
rt_kprintf("Unhandled interrupt %d occured!!!\n", vector);
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
rt_base_t index;
/* init exceptions table */
for(index=0; index < MAX_HANDLERS; index++)
{
irq_desc[index].handler = (rt_isr_handler_t)rt_hw_interrupt_handler;
irq_desc[index].param = RT_NULL;
}
for (index = 0; index < MAX_HANDLERS; index ++)
{
AT91C_BASE_AIC->AIC_SVR[index] = (rt_uint32_t)rt_hw_interrupt_handler;
}
/* init interrupt nest, and context in thread sp */
rt_interrupt_nest = 0;
rt_interrupt_from_thread = 0;
rt_interrupt_to_thread = 0;
rt_thread_switch_interrupt_flag = 0;
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
/* disable interrupt */
AT91C_BASE_AIC->AIC_IDCR = 1 << vector;
/* clear interrupt */
AT91C_BASE_AIC->AIC_ICCR = 1 << vector;
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
AT91C_BASE_AIC->AIC_IECR = 1 << vector;
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param handler the interrupt service routine to be installed
* @param param the parameter for interrupt service routine
* @name unused.
*
* @return the old handler
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if(vector >= 0 && vector < MAX_HANDLERS)
{
old_handler = irq_desc[vector].handler;
if (handler != RT_NULL)
{
irq_desc[vector].handler = (rt_isr_handler_t)handler;
irq_desc[vector].param = param;
}
}
return old_handler;
}
/*@}*/

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-23 Bernard the first version
*/
#include <rtthread.h>
#define SVCMODE 0x13
/**
* @addtogroup AT91SAM7
*/
/*@{*/
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
rt_uint8_t *stack_addr, void *texit)
{
rt_uint32_t *stk;
stack_addr += sizeof(rt_uint32_t);
stack_addr = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stack_addr, 8);
stk = (rt_uint32_t *)stack_addr;
*(--stk) = (rt_uint32_t)tentry; /* entry point */
*(--stk) = (rt_uint32_t)texit; /* lr */
*(--stk) = 0xdeadbeef; /* r12 */
*(--stk) = 0xdeadbeef; /* r11 */
*(--stk) = 0xdeadbeef; /* r10 */
*(--stk) = 0xdeadbeef; /* r9 */
*(--stk) = 0xdeadbeef; /* r8 */
*(--stk) = 0xdeadbeef; /* r7 */
*(--stk) = 0xdeadbeef; /* r6 */
*(--stk) = 0xdeadbeef; /* r5 */
*(--stk) = 0xdeadbeef; /* r4 */
*(--stk) = 0xdeadbeef; /* r3 */
*(--stk) = 0xdeadbeef; /* r2 */
*(--stk) = 0xdeadbeef; /* r1 */
*(--stk) = (rt_uint32_t)parameter; /* r0 : argument */
*(--stk) = SVCMODE; /* cpsr */
*(--stk) = SVCMODE; /* spsr */
/* return task's current stack address */
return (rt_uint8_t *)stk;
}
/*@}*/

View File

@ -0,0 +1,275 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-31 Bernard first version
*/
/* Internal Memory Base Addresses */
.equ FLASH_BASE, 0x00100000
.equ RAM_BASE, 0x00200000
/* Stack Configuration */
.equ TOP_STACK, 0x00204000
.equ UND_STACK_SIZE, 0x00000100
.equ SVC_STACK_SIZE, 0x00000400
.equ ABT_STACK_SIZE, 0x00000100
.equ FIQ_STACK_SIZE, 0x00000100
.equ IRQ_STACK_SIZE, 0x00000100
.equ USR_STACK_SIZE, 0x00000004
/* ARM architecture definitions */
.equ MODE_USR, 0x10
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ MODE_SYS, 0x1F
.equ I_BIT, 0x80 /* when this bit is set, IRQ is disabled */
.equ F_BIT, 0x40 /* when this bit is set, FIQ is disabled */
.section .init, "ax"
.code 32
.align 0
.globl _start
_start:
b reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
nop /* reserved vector */
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_undef: .word vector_undef
_vector_swi: .word vector_swi
_vector_pabt: .word vector_pabt
_vector_dabt: .word vector_dabt
_vector_resv: .word vector_resv
_vector_irq: .word vector_irq
_vector_fiq: .word vector_fiq
/*
* rtthread bss start and end
* which are defined in linker script
*/
.globl _bss_start
_bss_start: .word __bss_start
.globl _bss_end
_bss_end: .word __bss_end
/* the system entry */
reset:
/* disable watchdog */
ldr r0, =0xFFFFFD40
ldr r1, =0x00008000
str r1, [r0, #0x04]
/* enable the main oscillator */
ldr r0, =0xFFFFFC00
ldr r1, =0x00000601
str r1, [r0, #0x20]
/* wait for main oscillator to stabilize */
moscs_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #1
beq moscs_loop
/* set up the PLL */
ldr r1, =0x00191C05
str r1, [r0, #0x2C]
/* wait for PLL to lock */
pll_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #0x04
beq pll_loop
/* select clock */
ldr r1, =0x00000007
str r1, [r0, #0x30]
#ifdef __FLASH_BUILD__
/* copy exception vectors into internal sram */
/*
mov r8, #RAM_BASE
ldr r9, =_start
ldmia r9!, {r0-r7}
stmia r8!, {r0-r7}
ldmia r9!, {r0-r6}
stmia r8!, {r0-r6}
*/
#endif
/* setup stack for each mode */
ldr r0, =TOP_STACK
/* set stack */
/* undefined instruction mode */
msr cpsr_c, #MODE_UND|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #UND_STACK_SIZE
/* abort mode */
msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #ABT_STACK_SIZE
/* FIQ mode */
msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #FIQ_STACK_SIZE
/* IRQ mode */
msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #IRQ_STACK_SIZE
/* supervisor mode */
msr cpsr_c, #MODE_SVC|I_BIT|F_BIT
mov sp, r0
/* remap SRAM to 0x0000 */
/*
ldr r0, =0xFFFFFF00
mov r1, #0x01
str r1, [r0]
*/
/* mask all IRQs */
ldr r1, =0xFFFFF124
ldr r0, =0XFFFFFFFF
str r0, [r1]
/* copy .data to SRAM */
ldr r1, =_sidata /* .data start in image */
ldr r2, =_edata /* .data end in image */
ldr r3, =_sdata /* sram data start */
data_loop:
ldr r0, [r1, #0]
str r0, [r3]
add r1, r1, #4
add r3, r3, #4
cmp r3, r2 /* check if data to clear */
blo data_loop /* loop until done */
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup: .word rtthread_startup
/* exception handlers */
vector_undef: b vector_undef
vector_swi : b vector_swi
vector_pabt : b vector_pabt
vector_dabt : b vector_dabt
vector_resv : b vector_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/*
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return
*/
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
rt_hw_context_switch_interrupt_do:
mov r1, #0 @ clear flag
str r1, [r0]
ldmfd sp!, {r0-r12,lr}@ reload saved registers
stmfd sp!, {r0-r3} @ save r0-r3
mov r1, sp
add sp, sp, #16 @ restore sp
sub r2, lr, #4 @ save old task's pc to r2
mrs r3, spsr @ disable interrupt
orr r0, r3, #I_BIT|F_BIT
msr spsr_c, r0
ldr r0, =.+8 @ switch to interrupted task's stack
movs pc, r0
stmfd sp!, {r2} @ push old task's pc
stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
mov r4, r1 @ Special optimised code below
mov r5, r3
ldmfd r4!, {r0-r3}
stmfd sp!, {r0-r3} @ push old task's r3-r0
stmfd sp!, {r5} @ push old task's psr
mrs r4, spsr
stmfd sp!, {r4} @ push old task's spsr
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] @ get new task's stack pointer
ldmfd sp!, {r4} @ pop new task's spsr
msr SPSR_cxsf, r4
ldmfd sp!, {r4} @ pop new task's psr
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} @ pop new task's r0-r12,lr & pc

View File

@ -0,0 +1,517 @@
;/*****************************************************************************/
;/* SAM7.S: Startup file for Atmel AT91SAM7 device series */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2005-2006 Keil Software. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
;/*
; * The SAM7.S code is executed after CPU Reset. This file may be
; * translated with the following SET symbols. In uVision these SET
; * symbols are entered under Options - ASM - Define.
; *
; * REMAP: when set the startup code remaps exception vectors from
; * on-chip RAM to address 0.
; *
; * RAM_INTVEC: when set the startup code copies exception vectors
; * from on-chip Flash to on-chip RAM.
; */
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
; 2009-12-28 MingBai Bug fix (USR mode stack removed).
; 2009-12-29 MingBai Merge svc and irq stack, add abort handler.
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
; Internal Memory Base Addresses
FLASH_BASE EQU 0x00100000
RAM_BASE EQU 0x00200000
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000000
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000000
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
; Reset Controller (RSTC) definitions
RSTC_BASE EQU 0xFFFFFD00 ; RSTC Base Address
RSTC_MR EQU 0x08 ; RSTC_MR Offset
;/*
;// <e> Reset Controller (RSTC)
;// <o1.0> URSTEN: User Reset Enable
;// <i> Enables NRST Pin to generate Reset
;// <o1.8..11> ERSTL: External Reset Length <0-15>
;// <i> External Reset Time in 2^(ERSTL+1) Slow Clock Cycles
;// </e>
;*/
RSTC_SETUP EQU 1
RSTC_MR_Val EQU 0xA5000401
; Embedded Flash Controller (EFC) definitions
EFC_BASE EQU 0xFFFFFF00 ; EFC Base Address
EFC0_FMR EQU 0x60 ; EFC0_FMR Offset
EFC1_FMR EQU 0x70 ; EFC1_FMR Offset
;// <e> Embedded Flash Controller 0 (EFC0)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC0_SETUP EQU 1
EFC0_FMR_Val EQU 0x00320100
;// <e> Embedded Flash Controller 1 (EFC1)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC1_SETUP EQU 0
EFC1_FMR_Val EQU 0x00320100
; Watchdog Timer (WDT) definitions
WDT_BASE EQU 0xFFFFFD40 ; WDT Base Address
WDT_MR EQU 0x04 ; WDT_MR Offset
;// <e> Watchdog Timer (WDT)
;// <o1.0..11> WDV: Watchdog Counter Value <0-4095>
;// <o1.16..27> WDD: Watchdog Delta Value <0-4095>
;// <o1.12> WDFIEN: Watchdog Fault Interrupt Enable
;// <o1.13> WDRSTEN: Watchdog Reset Enable
;// <o1.14> WDRPROC: Watchdog Reset Processor
;// <o1.28> WDDBGHLT: Watchdog Debug Halt
;// <o1.29> WDIDLEHLT: Watchdog Idle Halt
;// <o1.15> WDDIS: Watchdog Disable
;// </e>
WDT_SETUP EQU 1
WDT_MR_Val EQU 0x00008000
; Power Mangement Controller (PMC) definitions
PMC_BASE EQU 0xFFFFFC00 ; PMC Base Address
PMC_MOR EQU 0x20 ; PMC_MOR Offset
PMC_MCFR EQU 0x24 ; PMC_MCFR Offset
PMC_PLLR EQU 0x2C ; PMC_PLLR Offset
PMC_MCKR EQU 0x30 ; PMC_MCKR Offset
PMC_SR EQU 0x68 ; PMC_SR Offset
PMC_MOSCEN EQU (1<<0) ; Main Oscillator Enable
PMC_OSCBYPASS EQU (1<<1) ; Main Oscillator Bypass
PMC_OSCOUNT EQU (0xFF<<8) ; Main OScillator Start-up Time
PMC_DIV EQU (0xFF<<0) ; PLL Divider
PMC_PLLCOUNT EQU (0x3F<<8) ; PLL Lock Counter
PMC_OUT EQU (0x03<<14) ; PLL Clock Frequency Range
PMC_MUL EQU (0x7FF<<16) ; PLL Multiplier
PMC_USBDIV EQU (0x03<<28) ; USB Clock Divider
PMC_CSS EQU (3<<0) ; Clock Source Selection
PMC_PRES EQU (7<<2) ; Prescaler Selection
PMC_MOSCS EQU (1<<0) ; Main Oscillator Stable
PMC_LOCK EQU (1<<2) ; PLL Lock Status
PMC_MCKRDY EQU (1<<3) ; Master Clock Status
;// <e> Power Mangement Controller (PMC)
;// <h> Main Oscillator
;// <o1.0> MOSCEN: Main Oscillator Enable
;// <o1.1> OSCBYPASS: Oscillator Bypass
;// <o1.8..15> OSCCOUNT: Main Oscillator Startup Time <0-255>
;// </h>
;// <h> Phase Locked Loop (PLL)
;// <o2.0..7> DIV: PLL Divider <0-255>
;// <o2.16..26> MUL: PLL Multiplier <0-2047>
;// <i> PLL Output is multiplied by MUL+1
;// <o2.14..15> OUT: PLL Clock Frequency Range
;// <0=> 80..160MHz <1=> Reserved
;// <2=> 150..220MHz <3=> Reserved
;// <o2.8..13> PLLCOUNT: PLL Lock Counter <0-63>
;// <o2.28..29> USBDIV: USB Clock Divider
;// <0=> None <1=> 2 <2=> 4 <3=> Reserved
;// </h>
;// <o3.0..1> CSS: Clock Source Selection
;// <0=> Slow Clock
;// <1=> Main Clock
;// <2=> Reserved
;// <3=> PLL Clock
;// <o3.2..4> PRES: Prescaler
;// <0=> None
;// <1=> Clock / 2 <2=> Clock / 4
;// <3=> Clock / 8 <4=> Clock / 16
;// <5=> Clock / 32 <6=> Clock / 64
;// <7=> Reserved
;// </e>
PMC_SETUP EQU 1
PMC_MOR_Val EQU 0x00000601
PMC_PLLR_Val EQU 0x00191C05
PMC_MCKR_Val EQU 0x00000007
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
Vectors LDR PC,Reset_Addr
LDR PC,Undef_Addr
LDR PC,SWI_Addr
LDR PC,PAbt_Addr
LDR PC,DAbt_Addr
NOP ; Reserved Vector
LDR PC,IRQ_Addr
LDR PC,FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B Abort_Handler
DAbt_Handler B Abort_Handler
FIQ_Handler B FIQ_Handler
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
; Setup RSTC
IF RSTC_SETUP != 0
LDR R0, =RSTC_BASE
LDR R1, =RSTC_MR_Val
STR R1, [R0, #RSTC_MR]
ENDIF
; Setup EFC0
IF EFC0_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC0_FMR_Val
STR R1, [R0, #EFC0_FMR]
ENDIF
; Setup EFC1
IF EFC1_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC1_FMR_Val
STR R1, [R0, #EFC1_FMR]
ENDIF
; Setup WDT
IF WDT_SETUP != 0
LDR R0, =WDT_BASE
LDR R1, =WDT_MR_Val
STR R1, [R0, #WDT_MR]
ENDIF
; Setup PMC
IF PMC_SETUP != 0
LDR R0, =PMC_BASE
; Setup Main Oscillator
LDR R1, =PMC_MOR_Val
STR R1, [R0, #PMC_MOR]
; Wait until Main Oscillator is stablilized
IF (PMC_MOR_Val:AND:PMC_MOSCEN) != 0
MOSCS_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MOSCS
BEQ MOSCS_Loop
ENDIF
; Setup the PLL
IF (PMC_PLLR_Val:AND:PMC_MUL) != 0
LDR R1, =PMC_PLLR_Val
STR R1, [R0, #PMC_PLLR]
; Wait until PLL is stabilized
PLL_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_LOCK
BEQ PLL_Loop
ENDIF
; Select Clock
IF (PMC_MCKR_Val:AND:PMC_CSS) == 1 ; Main Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_CSS
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ELIF (PMC_MCKR_Val:AND:PMC_CSS) == 3 ; PLL Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_PRES
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ENDIF ; Select Clock
ENDIF ; PMC_SETUP
; Copy Exception Vectors to Internal RAM
IF :DEF:RAM_INTVEC
ADR R8, Vectors ; Source
LDR R9, =RAM_BASE ; Destination
LDMIA R8!, {R0-R7} ; Load Vectors
STMIA R9!, {R0-R7} ; Store Vectors
LDMIA R8!, {R0-R7} ; Load Handler Addresses
STMIA R9!, {R0-R7} ; Store Handler Addresses
ENDIF
; Remap on-chip RAM to address 0
MC_BASE EQU 0xFFFFFF00 ; MC Base Address
MC_RCR EQU 0x00 ; MC_RCR Offset
IF :DEF:REMAP
LDR R0, =MC_BASE
MOV R1, #1
STR R1, [R0, #MC_RCR] ; Remap
ENDIF
; Setup Stack for each mode
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
; SUB R0, R0, #SVC_Stack_Size
; Enter User Mode and set its Stack Pointer
; MSR CPSR_c, #Mode_USR
IF :DEF:__MICROLIB
EXPORT __initial_sp
ELSE
; No usr mode stack here.
;MOV SP, R0
;SUB SL, SP, #USR_Stack_Size
ENDIF
; Enter the C code
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IMPORT rt_hw_trap_abort
IMPORT rt_interrupt_nest
Abort_Handler PROC
EXPORT Abort_Handler
stmfd sp!, {r0-r12,lr}
LDR r0, =rt_interrupt_nest
LDR r1, [r0]
CMP r1, #0
DeadLoop BHI DeadLoop ; Abort happened in irq mode, halt system.
bl rt_interrupt_enter
bl rt_hw_trap_abort
bl rt_interrupt_leave
b SWITCH
ENDP
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
SWITCH LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit|F_Bit|Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push old task's spsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task's psr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, = (Stack_Mem + IRQ_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-25 Bernard first version
*/
#include <rtthread.h>
#include <rthw.h>
#include "AT91SAM7X256.h"
/**
* @addtogroup AT91SAM7
*/
/*@{*/
void rt_hw_trap_irq(void)
{
int irqno;
extern struct rt_irq_desc irq_desc[];
/* get interrupt number */
irqno = AT91C_BASE_AIC->AIC_ISR;
/* invoke isr with parameters */
irq_desc[irqno].handler(irqno, irq_desc[irqno].param);
/* end of interrupt */
AT91C_BASE_AIC->AIC_EOICR = 0;
}
void rt_hw_trap_fiq(void)
{
rt_kprintf("fast interrupt request\n");
}
extern struct rt_thread* rt_current_thread;
void rt_hw_trap_abort(void)
{
rt_kprintf("Abort occured!!! Thread [%s] suspended.\n",rt_current_thread->parent.name);
rt_thread_suspend(rt_current_thread);
rt_schedule();
}
/*@}*/

View File

@ -0,0 +1,18 @@
# RT-Thread building script for bridge
import os
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
list = os.listdir(cwd)
# add common code files
group = SConscript(os.path.join('common', 'SConscript'))
# cpu porting code files
if rtconfig.CPU in list:
group = group + SConscript(os.path.join(rtconfig.CPU, 'SConscript'))
Return('group')

View File

@ -0,0 +1,23 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
if rtconfig.PLATFORM in ['armcc', 'armclang']:
src += Glob('*_rvds.S')
if rtconfig.PLATFORM in ['gcc']:
src += Glob('*_init.S')
src += Glob('*_gcc.S')
if rtconfig.PLATFORM in ['iccarm']:
src += Glob('*_iar.S')
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,354 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __AM33XX_H__
#define __AM33XX_H__
#define REG32(x) (*((volatile unsigned int *)(x)))
#define REG16(x) (*((volatile unsigned short *)(x)))
/** Cache Line size in ARM Cortex-A8. */
#define AM33XX_CACHELINE_SIZE (64)
/** @brief Base address of AINTC memory mapped registers */
#define AM33XX_AINTC_REGS (0x48200000)
/** @brief Base addresses of control module registers */
#define AM33XX_CTLM_REGS (0x44e10000)
/** @brief Base addresses of USB memory mapped registers */
#define AM33XX_USB_0_BASE (0x47401400)
#define AM33XX_USB_1_BASE (0x47401C00)
/** @brief Base addresses of SPI memory mapped registers */
#define AM33XX_SPI_0_REGS (0x48030000)
#define AM33XX_SPI_1_REGS (0x481A0000)
/** @brief Base addresses of GPIO memory mapped registers */
#define AM33XX_GPIO_0_REGS (0x44E07000)
#define AM33XX_GPIO_1_REGS (0x4804C000)
#define AM33XX_GPIO_2_REGS (0x481AC000)
#define AM33XX_GPIO_3_REGS (0x481AE000)
/** @brief Base addresses of DMTIMER memory mapped registers */
#define AM33XX_DMTIMER_0_REGS (0x44E05000)
#define AM33XX_DMTIMER_1_REGS (0x44E31000)
#define AM33XX_DMTIMER_2_REGS (0x48040000)
#define AM33XX_DMTIMER_3_REGS (0x48042000)
#define AM33XX_DMTIMER_4_REGS (0x48044000)
#define AM33XX_DMTIMER_5_REGS (0x48046000)
#define AM33XX_DMTIMER_6_REGS (0x48048000)
#define AM33XX_DMTIMER_7_REGS (0x4804A000)
/** @brief Base address of MMC memory mapped registers */
#define AM33XX_MMCHS_0_REGS (0x48060000)
#define AM33XX_MMCHS_1_REGS (0x481D8000)
#define AM33XX_MMCHS_2_REGS (0x47810000)
/** @brief Base address of GPMC memory mapped registers */
#define AM33XX_GPMC_0_REGS (0x50000000)
/** @brief Base address of GPMC memory mapped registers */
#define AM33XX_ELM_0_REGS (0x48080000)
/** @brief Base address of I2C memory mapped registers */
#define AM33XX_I2C_0_REGS (0x44E0B000)
#define AM33XX_I2C_1_REGS (0x4802A000)
#define AM33XX_I2C_2_REGS (0x4819C000)
/** @brief Base address of WDT memory mapped registers */
#define AM33XX_WDT_0_REGS (0x44E33000)
#define AM33XX_WDT_1_REGS (0x44E35000)
/** @brief Base address of WDT memory mapped registers */
#define AM33XX_CPSW_SS_REGS (0x4A100000)
#define AM33XX_CPSW_MDIO_REGS (0x4A101000)
#define AM33XX_CPSW_WR_REGS (0x4A101200)
#define AM33XX_CPSW_CPDMA_REGS (0x4A100800)
#define AM33XX_CPSW_ALE_REGS (0x4A100D00)
#define AM33XX_CPSW_STAT_REGS (0x4A100900)
#define AM33XX_CPSW_PORT_0_REGS (0x4A100100)
#define AM33XX_CPSW_PORT_1_REGS (0x4A100200)
#define AM33XX_CPSW_SLIVER_1_REGS (0x4A100D80)
#define AM33XX_CPSW_PORT_2_REGS (0x4A100300)
#define AM33XX_CPSW_SLIVER_2_REGS (0x4A100DC0)
#define AM33XX_CPSW_CPPI_RAM_REGS (0x4A102000)
/** @brief Base address of McASP memory mapped registers */
#define AM33XX_MCASP_1_CTRL_REGS (0x4803C000)
#define AM33XX_MCASP_1_FIFO_REGS (AM33XX_MCASP_1_CTRL_REGS + 0x1000)
#define AM33XX_MCASP_1_DATA_REGS (0x46400000)
/** @brief Base address of EMIF memory mapped registers */
#define AM33XX_EMIF_0_REGS (0x4C000000)
/** @brief Base addresses of RTC memory mapped registers */
#define AM33XX_RTC_0_REGS (0x44E3E000)
#define CM_PER(base) ((base) + 0)
#define CM_PER_L4LS_CLKSTCTRL(base) (CM_PER(base) + 0)
#define CM_PER_UART1_CLKCTRL(base) (CM_PER(base) + 0x6C)
#define CM_PER_UART2_CLKCTRL(base) (CM_PER(base) + 0x70)
#define CM_PER_UART3_CLKCTRL(base) (CM_PER(base) + 0x74)
#define CM_PER_UART4_CLKCTRL(base) (CM_PER(base) + 0x78)
#define CM_PER_UART5_CLKCTRL(base) (CM_PER(base) + 0x38)
#define CM_WKUP(base) ((base) + 0x400)
#define CM_WKUP_CLKSTCTRL(base) (CM_WKUP(base) + 0)
#define CM_WKUP_UART0_CLKCTRL(base) (CM_WKUP(base) + 0xB4)
#define CM_DPLL(base) ((base) + 0x500)
#define CM_MPU(base) ((base) + 0x600)
#define CM_DEVICE(base) ((base) + 0x700)
#define CM_RTC(base) ((base) + 0x800)
#define CM_GFX(base) ((base) + 0x900)
#define CM_CEFUSE(base) ((base) + 0xA00)
#define OCP_AM33XXKET_RAM(base) ((base) + 0xB00)
#define PRM_PER(base) ((base) + 0xC00)
#define PRM_PER_PWRSTST(base) (PRM_PER(base) + 0x008)
#define PRM_PER_PWRSTCTRL(base) (PRM_PER(base) + 0x00C)
#define PRM_WKUP(base) ((base) + 0xD00)
#define PRM_MPU(base) ((base) + 0xE00)
#define PRM_DEVICE(base) ((base) + 0xF00)
#define PRM_RTC(base) ((base) + 0x1000)
#define PRM_GFX(base) ((base) + 0x1100)
#define PRM_CEFUSE(base) ((base) + 0x1200)
/** @brief Base addresses of PRCM memory mapped registers */
#define AM33XX_PRCM_REGS (0x44E00000)
#define AM33XX_CM_PER_REGS CM_PER(AM33XX_PRCM_REGS)
#define AM33XX_CM_WKUP_REGS CM_WKUP(AM33XX_PRCM_REGS)
#define AM33XX_CM_DPLL_REGS CM_DPLL(AM33XX_PRCM_REGS)
#define AM33XX_CM_MPU_REGS CM_MPU(AM33XX_PRCM_REGS)
#define AM33XX_CM_DEVICE_REGS CM_DEVICE(AM33XX_PRCM_REGS)
#define AM33XX_CM_RTC_REGS CM_RTC(AM33XX_PRCM_REGS)
#define AM33XX_CM_GFX_REGS CM_GFX(AM33XX_PRCM_REGS)
#define AM33XX_CM_CEFUSE_REGS CM_CEFUSE(AM33XX_PRCM_REGS)
#define AM33XX_OCP_AM33XXKET_RAM_REGS OCP_AM33XXKET_RAM(AM33XX_PRCM_REGS)
#define AM33XX_PRM_PER_REGS PRM_PER(AM33XX_PRCM_REGS)
#define AM33XX_PRM_WKUP_REGS PRM_WKUP(AM33XX_PRCM_REGS)
#define AM33XX_PRM_MPU_REGS PRM_MPU(AM33XX_PRCM_REGS)
#define AM33XX_PRM_DEVICE_REGS PRM_DEVICE(AM33XX_PRCM_REGS)
#define AM33XX_PRM_RTC_REGS PRM_RTC(AM33XX_PRCM_REGS)
#define AM33XX_PRM_GFX_REGS PRM_GFX(AM33XX_PRCM_REGS)
#define AM33XX_PRM_CEFUSE_REGS PRM_CEFUSE(AM33XX_PRCM_REGS)
/** @brief Base address of control module memory mapped registers */
#define AM33XX_CONTROL_REGS (0x44E10000)
/** @brief Base address of Channel controller memory mapped registers */
#define AM33XX_EDMA30CC_0_REGS (0x49000000)
/** @brief Base address of DCAN module memory mapped registers */
#define AM33XX_DCAN_0_REGS (0x481CC000)
#define AM33XX_DCAN_1_REGS (0x481D0000)
/******************************************************************************\
* Parameterizable Configuration:- These are fed directly from the RTL
* parameters for the given AM33XX
\******************************************************************************/
#define TPCC_MUX(n) 0xF90 + ((n) * 4)
#define AM33XX_LCDC_0_REGS 0x4830E000
#define AM33XX_ADC_TSC_0_REGS 0x44E0D000
/** @brief Base addresses of PWMSS memory mapped registers. */
#define AM33XX_PWMSS0_REGS (0x48300000)
#define AM33XX_PWMSS1_REGS (0x48302000)
#define AM33XX_PWMSS2_REGS (0x48304000)
#define AM33XX_ECAP_REGS (0x00000100)
#define AM33XX_EQEP_REGS (0x00000180)
#define AM33XX_EPWM_REGS (0x00000200)
#define AM33XX_ECAP_0_REGS (AM33XX_PWMSS0_REGS + AM33XX_ECAP_REGS)
#define AM33XX_ECAP_1_REGS (AM33XX_PWMSS1_REGS + AM33XX_ECAP_REGS)
#define AM33XX_ECAP_2_REGS (AM33XX_PWMSS2_REGS + AM33XX_ECAP_REGS)
#define AM33XX_EQEP_0_REGS (AM33XX_PWMSS0_REGS + AM33XX_EQEP_REGS)
#define AM33XX_EQEP_1_REGS (AM33XX_PWMSS1_REGS + AM33XX_EQEP_REGS)
#define AM33XX_EQEP_2_REGS (AM33XX_PWMSS2_REGS + AM33XX_EQEP_REGS)
#define AM33XX_EPWM_0_REGS (AM33XX_PWMSS0_REGS + AM33XX_EPWM_REGS)
#define AM33XX_EPWM_1_REGS (AM33XX_PWMSS1_REGS + AM33XX_EPWM_REGS)
#define AM33XX_EPWM_2_REGS (AM33XX_PWMSS2_REGS + AM33XX_EPWM_REGS)
#define AM33XX_EPWM_MODULE_FREQ 100
/* PRCM registers */
#define CM_PER_L4LS_CLKSTCTRL_REG(base) REG32((base) + 0x0)
#define CM_PER_UART1_CLKCTRL_REG(base) REG32(CM_PER_UART1_CLKCTRL(base))
#define CM_PER_UART2_CLKCTRL_REG(base) REG32(CM_PER_UART2_CLKCTRL(base))
#define CM_PER_UART3_CLKCTRL_REG(base) REG32(CM_PER_UART3_CLKCTRL(base))
#define CM_PER_UART4_CLKCTRL_REG(base) REG32(CM_PER_UART4_CLKCTRL(base))
#define CM_PER_UART5_CLKCTRL_REG(base) REG32(CM_PER_UART5_CLKCTRL(base))
#define CM_PER_TIMER7_CLKCTRL(base) REG32((base) + 0x7C)
#define CM_PER_TIMER2_CLKCTRL(base) REG32((base) + 0x80)
#define PRM_PER_PWRSTST_REG(base) REG32(PRM_PER_PWRSTST(base))
#define PRM_PER_PWRSTCTRL_REG(base) REG32(PRM_PER_PWRSTCTRL(base))
#define CM_WKUP_CLKSTCTRL_REG(base) REG32(CM_WKUP_CLKSTCTRL(base))
#define CM_WKUP_UART0_CLKCTRL_REG(base) REG32(CM_WKUP_UART0_CLKCTRL(base))
#define CM_DPLL_CLKSEL_TIMER7_CLK(base) REG32(CM_DPLL(base) + 0x4)
#define CM_DPLL_CLKSEL_TIMER2_CLK(base) REG32(CM_DPLL(base) + 0x8)
/* timer registers */
#define DMTIMER_TIDR(base) REG32(base + 0x0)
#define DMTIMER_TIOCP_CFG(base) REG32(base + 0x10)
#define DMTIMER_IRQ_EOI(base) REG32(base + 0x20)
#define DMTIMER_IRQSTATUS_RAW(base) REG32(base + 0x24)
#define DMTIMER_IRQSTATUS(base) REG32(base + 0x28)
#define DMTIMER_IRQENABLE_SET(base) REG32(base + 0x2C)
#define DMTIMER_IRQENABLE_CLR(base) REG32(base + 0x30)
#define DMTIMER_IRQWAKEEN(base) REG32(base + 0x34)
#define DMTIMER_TCLR(base) REG32(base + 0x38)
#define DMTIMER_TCRR(base) REG32(base + 0x3C)
#define DMTIMER_TLDR(base) REG32(base + 0x40)
#define DMTIMER_TTGR(base) REG32(base + 0x44)
#define DMTIMER_TWPS(base) REG32(base + 0x48)
#define DMTIMER_TMAR(base) REG32(base + 0x4C)
#define DMTIMER_TCAR(base, n) REG32(base + 0x50 + (((n) - 1) * 8))
#define DMTIMER_TSICR(base) REG32(base + 0x54)
#define EMU_INT 0
#define COMMTX_INT 1
#define COMMRX_INT 2
#define BENCH_INT 3
#define ELM_IRQ_INT 4
#define NMI_INT 7
#define L3DEBUG_INT 9
#define L3APP_INT 10
#define PRCM_INT 11
#define EDMACOMP_INT 12
#define EDMAMPERR_INT 13
#define EDMAERR_INT 14
#define ADC_TSC_GEN_INT 16
#define USBSS_INT 17
#define USB_INT0 18
#define USB_INT1 19
#define PRU_ICSS_EVTOUT0_INT 20
#define PRU_ICSS_EVTOUT1_INT 21
#define PRU_ICSS_EVTOUT2_INT 22
#define PRU_ICSS_EVTOUT3_INT 23
#define PRU_ICSS_EVTOUT4_INT 24
#define PRU_ICSS_EVTOUT5_INT 25
#define PRU_ICSS_EVTOUT6_INT 26
#define PRU_ICSS_EVTOUT7_INT 27
#define MMCSD1_INT 28
#define MMCSD2_INT 29
#define I2C2_INT 30
#define ECAP0_INT 31
#define GPIO_INT2A 32
#define GPIO_INT2B 33
#define USBWAKEUP_INT 34
#define LCDC_INT 36
#define GFX_INT 37
#define EPWM2_INT 39
#define CPSW_RXTHR0_INT 40
#define CPSW_RX_INT0 41
#define CPSW_TX_INT0 42
#define CPSW_MISC0_INT 43
#define UART3_INT 44
#define UART4_INT 45
#define UART5_INT 46
#define ECAP1_INT 47
#define DCAN0_INT0 52
#define DCAN0_INT1 53
#define DCAN0_PARITY 54
#define DCAN1_INT0 55
#define DCAN1_INT1 56
#define DCAN1_PARITY 57
#define EPWM0_TZINT 58
#define EPWM1_TZINT 59
#define EPWM2_TZINT 60
#define ECAP2_INT 61
#define GPIO_INT3A 62
#define GPIO_INT3B 63
#define MMCSD0_INT 64
#define MCSPI0_INT 65
#define TINT0 66
#define TINT1_1MS 67
#define TINT2 68
#define TINT3 69
#define I2C0_INT 70
#define I2C1_INT 71
#define UART0_INT 72
#define UART1_INT 73
#define UART2_INT 74
#define RTC_INT 75
#define RTC_ALARM_INT 76
#define MB_INT0 77
#define M3_TXEV 78
#define EQEP0_INT 79
#define MACTX_INT0 80
#define MCARX_INT0 81
#define MCATX_INT1 82
#define MCARX_INT1 83
#define EPWM0_INT 86
#define EPWM1_INT 87
#define EQEP1_INT 88
#define EQEP2_INT 89
#define DMA_INTR_PIN2 90
#define WDT1_INT 91
#define TINT4 92
#define TINT5 93
#define TINT6 94
#define TINT7 95
#define GPIO_INT0A 96
#define GPIO_INT0B 97
#define GPIO_INT1A 98
#define GPIO_INT1B 99
#define GPMC_INT 100
#define DDRERR0 101
#define TCERR_INT0 112
#define TCERR_INT1 113
#define TCERR_INT2 114
#define ADC_TSC_PEN_INT 115
#define SMRFLX_MPU 120
#define SMRFLX_CORE 121
#define DMA_INTR_PIN0 123
#define DMA_INTR_PIN1 124
#define MCSPI1_INT 125
struct rt_hw_register
{
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long fp;
unsigned long ip;
unsigned long sp;
unsigned long lr;
unsigned long pc;
unsigned long cpsr;
unsigned long ORIG_r0;
};
#define USERMODE 0x10
#define FIQMODE 0x11
#define IRQMODE 0x12
#define SVCMODE 0x13
#define ABORTMODE 0x17
#define UNDEFMODE 0x1b
#define MODEMASK 0x1f
#define NOINT 0xc0
#endif

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
cpsid if
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr_c, r0
bx lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
orrne r4, r4, #0x20 @ it's thumb code
stmfd sp!, {r4} @ push cpsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
_do_switch:
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
bic r4, r4, #0x20 @ must be ARM mode
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
bx lr

View File

@ -0,0 +1,86 @@
;/*
; * Copyright (c) 2006-2021, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2011-08-14 weety copy from mini2440
; * 2015-04-15 ArdaFu convert from context_gcc.s
; */
#define NOINT 0xc0
SECTION .text:CODE(6)
/*
* rt_base_t rt_hw_interrupt_disable();
*/
PUBLIC rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS R0, CPSR
ORR R1, R0, #NOINT
MSR CPSR_C, R1
MOV PC, LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
PUBLIC rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR CPSR_CXSF, R0
MOV PC, LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
PUBLIC rt_hw_context_switch
rt_hw_context_switch:
STMFD SP!, {LR} ; push pc (lr should be pushed in place of PC)
STMFD SP!, {R0-R12, LR} ; push lr & register file
MRS R4, CPSR
TST LR, #0x01
ORRNE R4, R4, #0x20 ; it's thumb code
STMFD SP!, {R4} ; push cpsr
STR SP, [R0] ; store sp in preempted tasks TCB
LDR SP, [R1] ; get new task stack pointer
LDMFD SP!, {R4} ; pop new task spsr
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12, LR, PC}^ ; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
PUBLIC rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR SP, [R0] ; get new task stack pointer
LDMFD SP!, {R4} ; pop new task spsr
MSR SPSR_cxsf, R4
BIC R4, R4, #0x20 ; must be ARM mode
MSR CPSR_CXSF, R4
LDMFD SP!, {R0-R12, LR, PC}^ ; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
PUBLIC rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
LDR R2, =rt_thread_switch_interrupt_flag
LDR R3, [R2]
CMP R3, #1
BEQ _reswitch
MOV R3, #1 ; set flag to 1
STR R3, [R2]
LDR R2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR R0, [R2]
_reswitch:
LDR R2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR R1, [R2]
MOV PC, LR
END

View File

@ -0,0 +1,130 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
.globl rt_cpu_vector_set_base
rt_cpu_vector_set_base:
mcr p15, #0, r0, c12, c0, #0
dsb
bx lr
.globl rt_cpu_vector_get_base
rt_cpu_vector_get_base:
mrc p15, #0, r0, c12, c0, #0
bx lr
.globl rt_cpu_get_sctlr
rt_cpu_get_sctlr:
mrc p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_dcache_enable
rt_cpu_dcache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_icache_enable
rt_cpu_icache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
_FLD_MAX_WAY:
.word 0x3ff
_FLD_MAX_IDX:
.word 0x7ff
.globl rt_cpu_dcache_clean_flush
rt_cpu_dcache_clean_flush:
push {r4-r11}
dmb
mrc p15, #1, r0, c0, c0, #1 @ read clid register
ands r3, r0, #0x7000000 @ get level of coherency
mov r3, r3, lsr #23
beq finished
mov r10, #0
loop1:
add r2, r10, r10, lsr #1
mov r1, r0, lsr r2
and r1, r1, #7
cmp r1, #2
blt skip
mcr p15, #2, r10, c0, c0, #0
isb
mrc p15, #1, r1, c0, c0, #0
and r2, r1, #7
add r2, r2, #4
ldr r4, _FLD_MAX_WAY
ands r4, r4, r1, lsr #3
clz r5, r4
ldr r7, _FLD_MAX_IDX
ands r7, r7, r1, lsr #13
loop2:
mov r9, r4
loop3:
orr r11, r10, r9, lsl r5
orr r11, r11, r7, lsl r2
mcr p15, #0, r11, c7, c14, #2
subs r9, r9, #1
bge loop3
subs r7, r7, #1
bge loop2
skip:
add r10, r10, #2
cmp r3, r10
bgt loop1
finished:
dsb
isb
pop {r4-r11}
bx lr
.globl rt_cpu_dcache_disable
rt_cpu_dcache_disable:
push {r4-r11, lr}
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
bl rt_cpu_dcache_clean_flush
pop {r4-r11, lr}
bx lr
.globl rt_cpu_icache_disable
rt_cpu_icache_disable:
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_mmu_disable
rt_cpu_mmu_disable:
mcr p15, #0, r0, c8, c7, #0 @ invalidate tlb
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #1
mcr p15, #0, r0, c1, c0, #0 @ clear mmu bit
dsb
bx lr
.globl rt_cpu_mmu_enable
rt_cpu_mmu_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x001
mcr p15, #0, r0, c1, c0, #0 @ set mmu enable bit
dsb
bx lr
.globl rt_cpu_tlb_set
rt_cpu_tlb_set:
mcr p15, #0, r0, c2, c0, #0
dmb
bx lr

View File

@ -0,0 +1,139 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-04-06 zchong change to iar compiler from convert from cp15_gcc.S
*/
SECTION .text:CODE:NOROOT(2)
ARM
EXPORT rt_cpu_vector_set_base
rt_cpu_vector_set_base:
MCR p15, #0, r0, c12, c0, #0
DSB
BX lr
EXPORT rt_cpu_vector_get_base
rt_cpu_vector_get_base:
MRC p15, #0, r0, c12, c0, #0
BX lr
EXPORT rt_cpu_get_sctlr
rt_cpu_get_sctlr:
MRC p15, #0, r0, c1, c0, #0
BX lr
EXPORT rt_cpu_dcache_enable
rt_cpu_dcache_enable:
MRC p15, #0, r0, c1, c0, #0
ORR r0, r0, #0x00000004
MCR p15, #0, r0, c1, c0, #0
BX lr
EXPORT rt_cpu_icache_enable
rt_cpu_icache_enable:
MRC p15, #0, r0, c1, c0, #0
ORR r0, r0, #0x00001000
MCR p15, #0, r0, c1, c0, #0
BX lr
;_FLD_MAX_WAY DEFINE 0x3ff
;_FLD_MAX_IDX DEFINE 0x7ff
EXPORT rt_cpu_dcache_clean_flush
rt_cpu_dcache_clean_flush:
PUSH {r4-r11}
DMB
MRC p15, #1, r0, c0, c0, #1 ; read clid register
ANDS r3, r0, #0x7000000 ; get level of coherency
MOV r3, r3, lsr #23
BEQ finished
MOV r10, #0
loop1:
ADD r2, r10, r10, lsr #1
MOV r1, r0, lsr r2
AND r1, r1, #7
CMP r1, #2
BLT skip
MCR p15, #2, r10, c0, c0, #0
ISB
MRC p15, #1, r1, c0, c0, #0
AND r2, r1, #7
ADD r2, r2, #4
;LDR r4, _FLD_MAX_WAY
LDR r4, =0x3FF
ANDS r4, r4, r1, lsr #3
CLZ r5, r4
;LDR r7, _FLD_MAX_IDX
LDR r7, =0x7FF
ANDS r7, r7, r1, lsr #13
loop2:
MOV r9, r4
loop3:
ORR r11, r10, r9, lsl r5
ORR r11, r11, r7, lsl r2
MCR p15, #0, r11, c7, c14, #2
SUBS r9, r9, #1
BGE loop3
SUBS r7, r7, #1
BGE loop2
skip:
ADD r10, r10, #2
CMP r3, r10
BGT loop1
finished:
DSB
ISB
POP {r4-r11}
BX lr
EXPORT rt_cpu_dcache_disable
rt_cpu_dcache_disable:
PUSH {r4-r11, lr}
MRC p15, #0, r0, c1, c0, #0
BIC r0, r0, #0x00000004
MCR p15, #0, r0, c1, c0, #0
BL rt_cpu_dcache_clean_flush
POP {r4-r11, lr}
BX lr
EXPORT rt_cpu_icache_disable
rt_cpu_icache_disable:
MRC p15, #0, r0, c1, c0, #0
BIC r0, r0, #0x00001000
MCR p15, #0, r0, c1, c0, #0
BX lr
EXPORT rt_cpu_mmu_disable
rt_cpu_mmu_disable:
MCR p15, #0, r0, c8, c7, #0 ; invalidate tlb
MRC p15, #0, r0, c1, c0, #0
BIC r0, r0, #1
MCR p15, #0, r0, c1, c0, #0 ; clear mmu bit
DSB
BX lr
EXPORT rt_cpu_mmu_enable
rt_cpu_mmu_enable:
MRC p15, #0, r0, c1, c0, #0
ORR r0, r0, #0x001
MCR p15, #0, r0, c1, c0, #0 ; set mmu enable bit
DSB
BX lr
EXPORT rt_cpu_tlb_set
rt_cpu_tlb_set:
MCR p15, #0, r0, c2, c0, #0
DMB
BX lr
END

View File

@ -0,0 +1,197 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
* 2022-09-20 YangZhongQing
* add IAR assembler
*/
#include <rthw.h>
#include <rtthread.h>
#include "am33xx.h"
/**
* @addtogroup AM33xx
*/
/*@{*/
#define ICACHE_MASK (rt_uint32_t)(1 << 12)
#define DCACHE_MASK (rt_uint32_t)(1 << 2)
#if defined(__CC_ARM)
rt_inline rt_uint32_t cp15_rd(void)
{
rt_uint32_t i;
__asm
{
mrc p15, 0, i, c1, c0, 0
}
return i;
}
rt_inline void cache_enable(rt_uint32_t bit)
{
rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, bit
mcr p15, 0, value, c1, c0, 0
}
}
rt_inline void cache_disable(rt_uint32_t bit)
{
rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, bit
mcr p15, 0, value, c1, c0, 0
}
}
#elif defined(__GNUC__)
rt_inline rt_uint32_t cp15_rd(void)
{
rt_uint32_t i;
asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
return i;
}
rt_inline void cache_enable(rt_uint32_t bit)
{
__asm__ __volatile__( \
"mrc p15,0,r0,c1,c0,0\n\t" \
"orr r0,r0,%0\n\t" \
"mcr p15,0,r0,c1,c0,0" \
: \
:"r" (bit) \
:"memory");
}
rt_inline void cache_disable(rt_uint32_t bit)
{
__asm__ __volatile__( \
"mrc p15,0,r0,c1,c0,0\n\t" \
"bic r0,r0,%0\n\t" \
"mcr p15,0,r0,c1,c0,0" \
: \
:"r" (bit) \
:"memory");
}
#elif defined(__ICCARM__)
rt_inline rt_uint32_t cp15_rd(void)
{
rt_uint32_t i;
__asm volatile("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
return i;
}
rt_inline void cache_enable(rt_uint32_t bit)
{
rt_uint32_t tmp;
__asm volatile( \
"mrc p15,0,%0,c1,c0,0\n\t" \
"orr %0,%0,%1\n\t" \
"mcr p15,0,%0,c1,c0,0" \
:"+r"(tmp) \
:"r"(bit) \
:"memory");
}
rt_inline void cache_disable(rt_uint32_t bit)
{
rt_uint32_t tmp;
__asm volatile( \
"mrc p15,0,%0,c1,c0,0\n\t" \
"bic %0,%0,%1\n\t" \
"mcr p15,0,%0,c1,c0,0" \
:"+r"(tmp) \
:"r"(bit) \
:"memory");
}
#endif
/**
* enable I-Cache
*
*/
void rt_hw_cpu_icache_enable()
{
cache_enable(ICACHE_MASK);
}
/**
* disable I-Cache
*
*/
void rt_hw_cpu_icache_disable()
{
cache_disable(ICACHE_MASK);
}
/**
* return the status of I-Cache
*
*/
rt_base_t rt_hw_cpu_icache_status()
{
return (cp15_rd() & ICACHE_MASK);
}
/**
* enable D-Cache
*
*/
void rt_hw_cpu_dcache_enable()
{
cache_enable(DCACHE_MASK);
}
/**
* disable D-Cache
*
*/
void rt_hw_cpu_dcache_disable()
{
cache_disable(DCACHE_MASK);
}
/**
* return the status of D-Cache
*
*/
rt_base_t rt_hw_cpu_dcache_status()
{
return (cp15_rd() & DCACHE_MASK);
}
/**
* shutdown CPU
*
*/
void rt_hw_cpu_shutdown(void)
{
rt_base_t level;
rt_kprintf("shutdown...\n");
level = rt_hw_interrupt_disable();
while (level)
{
RT_ASSERT(0);
}
}
/*@}*/

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef CPUPORT_H__
#define CPUPORT_H__
#ifdef RT_USING_SMP
typedef union {
unsigned long slock;
struct __arch_tickets {
unsigned short owner;
unsigned short next;
} tickets;
} rt_hw_spinlock_t;
#endif
#endif /*CPUPORT_H__*/

View File

@ -0,0 +1,223 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-06 Bernard first version
* 2015-11-06 zchong support iar compiler
*/
#include <rthw.h>
#include <rtthread.h>
#include "am33xx.h"
#include "interrupt.h"
#define AINTC_BASE AM33XX_AINTC_REGS
#define MAX_HANDLERS 128
extern volatile rt_atomic_t rt_interrupt_nest;
/* exception and interrupt handler table */
struct rt_irq_desc isr_table[MAX_HANDLERS];
rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
rt_uint32_t rt_thread_switch_interrupt_flag;
/**
* @addtogroup AM33xx
*/
/*@{*/
void rt_dump_aintc(void)
{
int k;
rt_kprintf("active irq %d", INTC_SIR_IRQ(AINTC_BASE));
rt_kprintf("\n--- hw mask ---\n");
for (k = 0; k < 4; k++)
{
rt_kprintf("0x%08x, ", INTC_MIR(AINTC_BASE, k));
}
rt_kprintf("\n--- hw itr ---\n");
for (k = 0; k < 4; k++)
{
rt_kprintf("0x%08x, ", INTC_ITR(AINTC_BASE, k));
}
rt_kprintf("\n");
}
const unsigned int AM335X_VECTOR_BASE = 0x4030FC00;
extern void rt_cpu_vector_set_base(unsigned int addr);
#ifdef __ICCARM__
extern int __vector;
#else
extern int system_vectors;
#endif
static void rt_hw_vector_init(void)
{
unsigned int *dest = (unsigned int *)AM335X_VECTOR_BASE;
#ifdef __ICCARM__
unsigned int *src = (unsigned int *)&__vector;
#else
unsigned int *src = (unsigned int *)&system_vectors;
#endif
rt_memcpy(dest, src, 16 * 4);
rt_cpu_vector_set_base(AM335X_VECTOR_BASE);
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
/* Reset the ARM interrupt controller */
INTC_SYSCONFIG(AINTC_BASE) = INTC_SYSCONFIG_SOFTRESET;
/* Wait for the reset to complete */
while((INTC_SYSSTATUS(AINTC_BASE)
& INTC_SYSSTATUS_RESETDONE) != INTC_SYSSTATUS_RESETDONE);
/* Enable any interrupt generation by setting priority threshold */
INTC_THRESHOLD(AINTC_BASE) = INTC_THRESHOLD_PRIORITYTHRESHOLD;
/* initialize vector table */
rt_hw_vector_init();
/* init exceptions table */
rt_memset(isr_table, 0x00, sizeof(isr_table));
/* init interrupt nest, and context in thread sp */
rt_interrupt_nest = 0;
rt_interrupt_from_thread = 0;
rt_interrupt_to_thread = 0;
rt_thread_switch_interrupt_flag = 0;
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
INTC_MIR_SET(AINTC_BASE, vector >> 0x05) = 0x1 << (vector & 0x1f);
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
INTC_MIR_CLEAR(AINTC_BASE, vector >> 0x05) = 0x1 << (vector & 0x1f);
}
/**
* This function will control the interrupt attribute.
* @param vector the interrupt number
*/
void rt_hw_interrupt_control(int vector, int priority, int route)
{
int fiq;
if (route == 0)
fiq = 0;
else
fiq = 1;
INTC_ILR(AINTC_BASE, vector) = ((priority << 0x02) & 0x1FC) | fiq ;
}
int rt_hw_interrupt_get_active(int fiq_irq)
{
int ir;
if (fiq_irq == INT_FIQ)
{
ir = INTC_SIR_FIQ(AINTC_BASE) & 0x7f;
}
else
{
ir = INTC_SIR_IRQ(AINTC_BASE) & 0x7f;
}
return ir;
}
void rt_hw_interrupt_ack(int fiq_irq)
{
if (fiq_irq == INT_FIQ)
{
/* new FIQ generation */
INTC_CONTROL(AINTC_BASE) |= 0x02;
}
else
{
/* new IRQ generation */
INTC_CONTROL(AINTC_BASE) |= 0x01;
}
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if(vector < MAX_HANDLERS)
{
old_handler = isr_table[vector].handler;
if (handler != RT_NULL)
{
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(isr_table[vector].name, name, RT_NAME_MAX);
#endif /* RT_USING_INTERRUPT_INFO */
isr_table[vector].handler = handler;
isr_table[vector].param = param;
}
}
return old_handler;
}
/**
* This function will trigger an interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_trigger(int vector)
{
INTC_ISR_SET(AINTC_BASE, vector>>5) = 1 << (vector & 0x1f);
}
void rt_hw_interrupt_clear(int vector)
{
INTC_ISR_CLEAR(AINTC_BASE, vector>>5) = 1 << (vector & 0x1f);
}
void rt_dump_isr_table(void)
{
int idx;
for(idx = 0; idx < MAX_HANDLERS; idx++)
{
#ifdef RT_USING_INTERRUPT_INFO
rt_kprintf("nr:%4d, name: %*.s, handler: 0x%p, param: 0x%08x\r\n",
idx, RT_NAME_MAX, isr_table[idx].name,
isr_table[idx].handler, isr_table[idx].param);
#else
rt_kprintf("nr:%4d, handler: 0x%p, param: 0x%08x\r\n",
idx, isr_table[idx].handler, isr_table[idx].param);
#endif
}
}
/*@}*/

View File

@ -0,0 +1,255 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-06 Bernard first version
*/
#ifndef __INTERRUPT_H__
#define __INTERRUPT_H__
#define INT_IRQ 0x00
#define INT_FIQ 0x01
/*************************************************************************\
* Registers Definition
\*************************************************************************/
#define INTC_REVISION(hw_base) REG32((hw_base) + 0x0)
#define INTC_SYSCONFIG(hw_base) REG32((hw_base) + 0x10)
#define INTC_SYSSTATUS(hw_base) REG32((hw_base) + 0x14)
#define INTC_SIR_IRQ(hw_base) REG32((hw_base) + 0x40)
#define INTC_SIR_FIQ(hw_base) REG32((hw_base) + 0x44)
#define INTC_CONTROL(hw_base) REG32((hw_base) + 0x48)
#define INTC_PROTECTION(hw_base) REG32((hw_base) + 0x4c)
#define INTC_IDLE(hw_base) REG32((hw_base) + 0x50)
#define INTC_IRQ_PRIORITY(hw_base) REG32((hw_base) + 0x60)
#define INTC_FIQ_PRIORITY(hw_base) REG32((hw_base) + 0x64)
#define INTC_THRESHOLD(hw_base) REG32((hw_base) + 0x68)
#define INTC_SICR(hw_base) REG32((hw_base) + 0x6c)
#define INTC_SCR(hw_base, n) REG32((hw_base) + 0x70 + ((n) * 0x04))
#define INTC_ITR(hw_base, n) REG32((hw_base) + 0x80 + ((n) * 0x20))
#define INTC_MIR(hw_base, n) REG32((hw_base) + 0x84 + ((n) * 0x20))
#define INTC_MIR_CLEAR(hw_base, n) REG32((hw_base) + 0x88 + ((n) * 0x20))
#define INTC_MIR_SET(hw_base, n) REG32((hw_base) + 0x8c + ((n) * 0x20))
#define INTC_ISR_SET(hw_base, n) REG32((hw_base) + 0x90 + ((n) * 0x20))
#define INTC_ISR_CLEAR(hw_base, n) REG32((hw_base) + 0x94 + ((n) * 0x20))
#define INTC_PENDING_IRQ(hw_base, n) REG32((hw_base) + 0x98 + ((n) * 0x20))
#define INTC_PENDING_FIQ(hw_base, n) REG32((hw_base) + 0x9c + ((n) * 0x20))
#define INTC_ILR(hw_base, n) REG32((hw_base) + 0x100 + ((n) * 0x04))
/**************************************************************************\
* Field Definition Macros
\**************************************************************************/
/* REVISION */
#define INTC_REVISION_REV (0x000000FFu)
#define INTC_REVISION_REV_SHIFT (0x00000000u)
/* SYSCONFIG */
#define INTC_SYSCONFIG_SOFTRESET (0x00000002u)
#define INTC_SYSCONFIG_SOFTRESET_SHIFT (0x00000001u)
#define INTC_SYSCONFIG_AUTOIDLE (0x00000001u)
#define INTC_SYSCONFIG_AUTOIDLE_SHIFT (0x00000000u)
/* SYSSTATUS */
#define INTC_SYSSTATUS_RESETDONE (0x00000001u)
#define INTC_SYSSTATUS_RESETDONE_SHIFT (0x00000000u)
/* SIR_IRQ */
#define INTC_SIR_IRQ_SPURIOUSIRQ (0xFFFFFF80u)
#define INTC_SIR_IRQ_SPURIOUSIRQ_SHIFT (0x00000007u)
#define INTC_SIR_IRQ_ACTIVEIRQ (0x0000007F)
#define INTC_SIR_IRQ_ACTIVEIRQ_SHIFT (0x00000000)
/* SIR_FIQ */
#define INTC_SIR_FIQ_SPURIOUSFIQ (0xFFFFFF80)
#define INTC_SIR_FIQ_SPURIOUSFIQ_SHIFT (0x00000007)
#define INTC_SIR_FIQ_ACTIVEFIQ (0x0000007F)
#define INTC_SIR_FIQ_ACTIVEFIQ_SHIFT (0x00000000)
/* CONTROL */
#define INTC_CONTROL_NEWFIQAGR (0x00000002)
#define INTC_CONTROL_NEWFIQAGR_SHIFT (0x00000001)
#define INTC_CONTROL_NEWIRQAGR (0x00000001)
#define INTC_CONTROL_NEWIRQAGR_SHIFT (0x00000000)
/* PROTECTION */
#define INTC_PROTECTION_PROTECTION (0x00000001u)
#define INTC_PROTECTION_PROTECTION_SHIFT (0x00000000u)
/* IDLE */
#define INTC_IDLE_TURBO (0x00000002u)
#define INTC_IDLE_TURBO_SHIFT (0x00000001u)
#define INTC_IDLE_FUNCIDLE (0x00000001u)
#define INTC_IDLE_FUNCIDLE_SHIFT (0x00000000u)
/* IRQ_PRIORITY */
#define INTC_IRQ_PRIORITY_SPURIOUSIRQFLAG (0xFFFFFFC0u)
#define INTC_IRQ_PRIORITY_SPURIOUSIRQFLAG_SHIFT (0x00000006u)
#define INTC_IRQ_PRIORITY_IRQPRIORITY (0x0000003Fu)
#define INTC_IRQ_PRIORITY_IRQPRIORITY_SHIFT (0x00000000u)
/* FIQ_PRIORITY */
#define INTC_FIQ_PRIORITY_SPURIOUSFIQFLAG (0xFFFFFFC0u)
#define INTC_FIQ_PRIORITY_SPURIOUSFIQFLAG_SHIFT (0x00000006u)
#define INTC_FIQ_PRIORITY_FIQPRIORITY (0x0000003Fu)
#define INTC_FIQ_PRIORITY_FIQPRIORITY_SHIFT (0x00000000u)
/* THRESHOLD */
#define INTC_THRESHOLD_PRIORITYTHRESHOLD (0x000000FFu)
#define INTC_THRESHOLD_PRIORITYTHRESHOLD_SHIFT (0x00000000u)
/* SICR */
#define INTC_SICR_GLOBALMASK (0x00000040u)
#define INTC_SICR_GLOBALMASK_SHIFT (0x00000006u)
#define INTC_SICR_SOFTRESETINH (0x00000020u)
#define INTC_SICR_SOFTRESETINH_SHIFT (0x00000005u)
#define INTC_SICR_PUBLICMASKFEEDBACK (0x00000010u)
#define INTC_SICR_PUBLICMASKFEEDBACK_SHIFT (0x00000004u)
#define INTC_SICR_PUBLICINHIBIT (0x00000008u)
#define INTC_SICR_PUBLICINHIBIT_SHIFT (0x00000003u)
#define INTC_SICR_AUTOINHIBIT (0x00000004u)
#define INTC_SICR_AUTOINHIBIT_SHIFT (0x00000002u)
#define INTC_SICR_SSMFIQENABLE (0x00000002u)
#define INTC_SICR_SSMFIQENABLE_SHIFT (0x00000001u)
#define INTC_SICR_SSMFIQSTATUS (0x00000001u)
#define INTC_SICR_SSMFIQSTATUS_SHIFT (0x00000000u)
/* SCR0 */
#define INTC_SCR0_SECUREENABLE (0xFFFFFFFFu)
#define INTC_SCR0_SECUREENABLE_SHIFT (0x00000000u)
/* SCR1 */
#define INTC_SCR1_SECUREENABLE (0xFFFFFFFFu)
#define INTC_SCR1_SECUREENABLE_SHIFT (0x00000000u)
/* SCR2 */
#define INTC_SCR2_SECUREENABLE (0xFFFFFFFFu)
#define INTC_SCR2_SECUREENABLE_SHIFT (0x00000000u)
/* ITR0 */
#define INTC_ITR0_ITR (0xFFFFFFFFu)
#define INTC_ITR0_ITR_SHIFT (0x00000000u)
/* MIR0 */
#define INTC_MIR0_MIR (0xFFFFFFFFu)
#define INTC_MIR0_MIR_SHIFT (0x00000000u)
/* MIR_CLEAR0 */
#define INTC_MIR_CLEAR0_MIRCLEAR (0xFFFFFFFFu)
#define INTC_MIR_CLEAR0_MIRCLEAR_SHIFT (0x00000000u)
/* MIR_SET0 */
#define INTC_MIR_SET0_MIRSET (0xFFFFFFFFu)
#define INTC_MIR_SET0_MIRSET_SHIFT (0x00000000u)
/* ISR_SET0 */
#define INTC_ISR_SET0_ISRSET (0xFFFFFFFFu)
#define INTC_ISR_SET0_ISRSET_SHIFT (0x00000000u)
/* ISR_CLEAR0 */
#define INTC_ISR_CLEAR0_ISRCLEAR (0xFFFFFFFFu)
#define INTC_ISR_CLEAR0_ISRCLEAR_SHIFT (0x00000000u)
/* PENDING_IRQ0 */
#define INTC_PENDING_IRQ0_PENDING_IRQ (0xFFFFFFFFu)
#define INTC_PENDING_IRQ0_PENDING_IRQ_SHIFT (0x00000000u)
/* PENDING_FIQ0 */
#define INTC_PENDING_FIQ0_PENDING_FIQ (0xFFFFFFFFu)
#define INTC_PENDING_FIQ0_PENDING_FIQ_SHIFT (0x00000000u)
/* ITR1 */
#define INTC_ITR1_ITR (0xFFFFFFFFu)
#define INTC_ITR1_ITR_SHIFT (0x00000000u)
/* MIR1 */
#define INTC_MIR1_MIR (0xFFFFFFFFu)
#define INTC_MIR1_MIR_SHIFT (0x00000000u)
/* MIR_CLEAR1 */
#define INTC_MIR_CLEAR1_MIRCLEAR (0xFFFFFFFFu)
#define INTC_MIR_CLEAR1_MIRCLEAR_SHIFT (0x00000000u)
/* MIR_SET1 */
#define INTC_MIR_SET1_MIRSET (0xFFFFFFFFu)
#define INTC_MIR_SET1_MIRSET_SHIFT (0x00000000u)
/* ISR_SET1 */
#define INTC_ISR_SET1_ISRSET (0xFFFFFFFFu)
#define INTC_ISR_SET1_ISRSET_SHIFT (0x00000000u)
/* ISR_CLEAR1 */
#define INTC_ISR_CLEAR1_ISRCLEAR (0xFFFFFFFFu)
#define INTC_ISR_CLEAR1_ISRCLEAR_SHIFT (0x00000000u)
/* PENDING_IRQ1 */
#define INTC_PENDING_IRQ1_PENDING_IRQ (0xFFFFFFFFu)
#define INTC_PENDING_IRQ1_PENDING_IRQ_SHIFT (0x00000000u)
/* PENDING_FIQ1 */
#define INTC_PENDING_FIQ1_PENDING_FIQ (0xFFFFFFFFu)
#define INTC_PENDING_FIQ1_PENDING_FIQ_SHIFT (0x00000000u)
/* ITR2 */
#define INTC_ITR2_ITR (0xFFFFFFFFu)
#define INTC_ITR2_ITR_SHIFT (0x00000000u)
/* MIR2 */
#define INTC_MIR2_MIR (0xFFFFFFFFu)
#define INTC_MIR2_MIR_SHIFT (0x00000000u)
/* MIR_CLEAR2 */
#define INTC_MIR_CLEAR2_MIRCLEAR (0xFFFFFFFFu)
#define INTC_MIR_CLEAR2_MIRCLEAR_SHIFT (0x00000000u)
/* MIR_SET2 */
#define INTC_MIR_SET2_MIRSET (0xFFFFFFFFu)
#define INTC_MIR_SET2_MIRSET_SHIFT (0x00000000u)
/* ISR_SET2 */
#define INTC_ISR_SET2_ISRSET (0xFFFFFFFFu)
#define INTC_ISR_SET2_ISRSET_SHIFT (0x00000000u)
/* ISR_CLEAR2 */
#define INTC_ISR_CLEAR2_ISRCLEAR (0xFFFFFFFFu)
#define INTC_ISR_CLEAR2_ISRCLEAR_SHIFT (0x00000000u)
/* PENDING_IRQ2 */
#define INTC_PENDING_IRQ2_PENDING_IRQ (0xFFFFFFFFu)
#define INTC_PENDING_IRQ2_PENDING_IRQ_SHIFT (0x00000000u)
/* PENDING_FIQ2 */
#define INTC_PENDING_FIQ2_PENDING_FIQ (0xFFFFFFFFu)
#define INTC_PENDING_FIQ2_PENDING_FIQ_SHIFT (0x00000000u)
/* ILR */
#define INTC_ILR_PRIORITY (0x000001FCu)
#define INTC_ILR_PRIORITY_SHIFT (0x00000002u)
#define INTC_ILR_FIQNIRQ (0x00000001u)
#define INTC_ILR_FIQNIRQ_SHIFT (0x00000000u)
void rt_hw_interrupt_control(int vector, int priority, int route);
int rt_hw_interrupt_get_active(int fiq_irq);
void rt_hw_interrupt_ack(int fiq_irq);
void rt_hw_interrupt_trigger(int vector);
void rt_hw_interrupt_clear(int vector);
#endif

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-10 bernard porting to AM1808
*/
#include <rtthread.h>
#include "am33xx.h"
#include <mmu.h>
extern void rt_cpu_dcache_disable(void);
extern void rt_hw_cpu_dcache_enable(void);
extern void rt_cpu_icache_disable(void);
extern void rt_hw_cpu_icache_enable(void);
extern void rt_cpu_mmu_disable(void);
extern void rt_cpu_mmu_enable(void);
extern void rt_cpu_tlb_set(register rt_uint32_t i);
void mmu_disable_dcache()
{
rt_cpu_dcache_disable();
}
void mmu_enable_dcache()
{
rt_hw_cpu_dcache_enable();
}
void mmu_disable_icache()
{
rt_cpu_icache_disable();
}
void mmu_enable_icache()
{
rt_hw_cpu_icache_enable();
}
void mmu_disable()
{
rt_cpu_mmu_disable();
}
void mmu_enable()
{
rt_cpu_mmu_enable();
}
void mmu_setttbase(register rt_uint32_t i)
{
register rt_uint32_t value;
/* Invalidates all TLBs.Domain access is selected as
* client by configuring domain access register,
* in that case access controlled by permission value
* set by page table entry
*/
value = 0;
asm volatile ("mcr p15, 0, %0, c8, c7, 0"::"r"(value));
value = 0x55555555;
asm volatile ("mcr p15, 0, %0, c3, c0, 0"::"r"(value));
rt_cpu_tlb_set(i);
}
void mmu_set_domain(register rt_uint32_t i)
{
asm volatile ("mcr p15,0, %0, c3, c0, 0": :"r" (i));
}
void mmu_enable_alignfault()
{
register rt_uint32_t i;
/* read control register */
asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
i |= (1 << 1);
/* write back to control register */
asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
}
void mmu_disable_alignfault()
{
register rt_uint32_t i;
/* read control register */
asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
i &= ~(1 << 1);
/* write back to control register */
asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
}
void mmu_clean_invalidated_cache_index(int index)
{
asm volatile ("mcr p15, 0, %0, c7, c14, 2": :"r" (index));
}
void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
unsigned int ptr;
ptr = buffer & ~0x1f;
while (ptr < buffer + size)
{
asm volatile ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr));
ptr += 32;
}
}
void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
unsigned int ptr;
ptr = buffer & ~0x1f;
while (ptr < buffer + size)
{
asm volatile ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr));
ptr += 32;
}
}
void mmu_invalidate_tlb()
{
asm volatile ("mcr p15, 0, %0, c8, c7, 0": :"r" (0));
}
void mmu_invalidate_icache()
{
asm volatile ("mcr p15, 0, %0, c7, c5, 0": :"r" (0));
}
/* level1 page table */
static volatile unsigned int _page_table[4*1024] __attribute__((aligned(16*1024)));
void mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd, rt_uint32_t paddrStart, rt_uint32_t attr)
{
volatile rt_uint32_t *pTT;
int i,nSec;
pTT=(rt_uint32_t *)_page_table+(vaddrStart>>20);
nSec=(vaddrEnd>>20)-(vaddrStart>>20);
for(i=0;i<=nSec;i++)
{
*pTT = attr |(((paddrStart>>20)+i)<<20);
pTT++;
}
}
/* set page table */
rt_weak void mmu_setmtts(void)
{
mmu_setmtt(0x00000000, 0xFFFFFFFF, 0x00000000, RW_NCNB); /* None cached for 4G memory */
mmu_setmtt(0x80200000, 0x80800000 - 1, 0x80200000, RW_CB); /* 126M cached DDR memory */
mmu_setmtt(0x80000000, 0x80200000 - 1, 0x80000000, RW_NCNB); /* 2M none-cached DDR memory */
mmu_setmtt(0x402F0000, 0x40300000 - 1, 0x402F0000, RW_CB); /* 63K OnChip memory */
}
void rt_hw_mmu_init(void)
{
/* disable I/D cache */
mmu_disable_dcache();
mmu_disable_icache();
mmu_disable();
mmu_invalidate_tlb();
mmu_setmtts();
/* set MMU table address */
mmu_setttbase((rt_uint32_t)_page_table);
/* enables MMU */
mmu_enable();
/* enable Instruction Cache */
mmu_enable_icache();
/* enable Data Cache */
mmu_enable_dcache();
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-10 bernard porting to AM1808
*/
#ifndef __MMU_H__
#define __MMU_H__
#include <rtthread.h>
#define DESC_SEC (0x2)
#define CB (3<<2) //cache_on, write_back
#define CNB (2<<2) //cache_on, write_through
#define NCB (1<<2) //cache_off,WR_BUF on
#define NCNB (0<<2) //cache_off,WR_BUF off
#define AP_RW (3<<10) //supervisor=RW, user=RW
#define AP_RO (2<<10) //supervisor=RW, user=RO
#define DOMAIN_FAULT (0x0)
#define DOMAIN_CHK (0x1)
#define DOMAIN_NOTCHK (0x3)
#define DOMAIN0 (0x0<<5)
#define DOMAIN1 (0x1<<5)
#define DOMAIN0_ATTR (DOMAIN_CHK<<0)
#define DOMAIN1_ATTR (DOMAIN_FAULT<<2)
#define RW_CB (AP_RW|DOMAIN0|CB|DESC_SEC) /* Read/Write, cache, write back */
#define RW_CNB (AP_RW|DOMAIN0|CNB|DESC_SEC) /* Read/Write, cache, write through */
#define RW_NCNB (AP_RW|DOMAIN0|NCNB|DESC_SEC) /* Read/Write without cache and write buffer */
#define RW_FAULT (AP_RW|DOMAIN1|NCNB|DESC_SEC) /* Read/Write without cache and write buffer */
void rt_hw_mmu_init(void);
#endif

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-23 Bernard the first version
* 2011-10-05 Bernard add thumb mode
*/
#include <rtthread.h>
#include "am33xx.h"
/**
* @addtogroup AM33xx
*/
/*@{*/
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
rt_uint8_t *stack_addr, void *texit)
{
rt_uint32_t *stk;
stack_addr += sizeof(rt_uint32_t);
stack_addr = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stack_addr, 8);
stk = (rt_uint32_t *)stack_addr;
*(--stk) = (rt_uint32_t)tentry; /* entry point */
*(--stk) = (rt_uint32_t)texit; /* lr */
*(--stk) = 0xdeadbeef; /* r12 */
*(--stk) = 0xdeadbeef; /* r11 */
*(--stk) = 0xdeadbeef; /* r10 */
*(--stk) = 0xdeadbeef; /* r9 */
*(--stk) = 0xdeadbeef; /* r8 */
*(--stk) = 0xdeadbeef; /* r7 */
*(--stk) = 0xdeadbeef; /* r6 */
*(--stk) = 0xdeadbeef; /* r5 */
*(--stk) = 0xdeadbeef; /* r4 */
*(--stk) = 0xdeadbeef; /* r3 */
*(--stk) = 0xdeadbeef; /* r2 */
*(--stk) = 0xdeadbeef; /* r1 */
*(--stk) = (rt_uint32_t)parameter; /* r0 : argument */
/* cpsr */
if ((rt_uint32_t)tentry & 0x01)
*(--stk) = SVCMODE | 0x20; /* thumb mode */
else
*(--stk) = SVCMODE; /* arm mode */
/* return task's current stack address */
return (rt_uint8_t *)stk;
}
/*@}*/

Some files were not shown because too many files have changed in this diff Show More