原始版本
This commit is contained in:
40
RT_Thread/libcpu/aarch64/common/SConscript
Normal file
40
RT_Thread/libcpu/aarch64/common/SConscript
Normal file
@ -0,0 +1,40 @@
|
||||
# RT-Thread building script for component
|
||||
|
||||
from building import *
|
||||
|
||||
Import('rtconfig')
|
||||
|
||||
cwd = GetCurrentDir()
|
||||
src = Glob('*.c') + Glob('*.cpp') + Glob('*.S')
|
||||
CPPPATH = [cwd, cwd + '/include']
|
||||
|
||||
if GetDepend('RT_USING_SMP'):
|
||||
core_model = 'mp'
|
||||
else:
|
||||
core_model = 'up'
|
||||
|
||||
src += Glob(core_model + '/*.S')
|
||||
|
||||
if GetDepend('RT_USING_OFW') == False:
|
||||
SrcRemove(src, ['setup.c', 'cpu_psci.c', 'psci.c'])
|
||||
|
||||
if GetDepend('RT_USING_PIC') == True:
|
||||
SrcRemove(src, ['gicv3.c', 'gic.c', 'gtimer.c', 'interrupt.c'])
|
||||
|
||||
if GetDepend('RT_HWTIMER_ARM_ARCH') == True:
|
||||
SrcRemove(src, ['gtimer.c'])
|
||||
|
||||
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
|
||||
|
||||
# build for sub-directory
|
||||
list = os.listdir(cwd)
|
||||
objs = []
|
||||
|
||||
for d in list:
|
||||
path = os.path.join(cwd, d)
|
||||
if os.path.isfile(os.path.join(path, 'SConscript')):
|
||||
objs = objs + SConscript(os.path.join(d, 'SConscript'))
|
||||
group = group + objs
|
||||
|
||||
|
||||
Return('group')
|
||||
112
RT_Thread/libcpu/aarch64/common/atomic_aarch64.c
Normal file
112
RT_Thread/libcpu/aarch64/common/atomic_aarch64.c
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-05-18 GuEe-GUI first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtatomic.h>
|
||||
|
||||
rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
|
||||
{
|
||||
rt_atomic_t ret;
|
||||
|
||||
__asm__ volatile (
|
||||
" ldr %0, %1\n"
|
||||
" dmb ish"
|
||||
: "=r" (ret)
|
||||
: "Q" (*ptr)
|
||||
: "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
|
||||
{
|
||||
__asm__ volatile (
|
||||
" str %1, %0\n"
|
||||
" dmb ish"
|
||||
: "=Q" (*ptr)
|
||||
: "r" (val)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#define AARCH64_ATOMIC_OP_RETURN(op, ins, constraint) \
|
||||
rt_atomic_t rt_hw_atomic_##op(volatile rt_atomic_t *ptr, rt_atomic_t in_val) \
|
||||
{ \
|
||||
rt_atomic_t tmp, val, result; \
|
||||
\
|
||||
__asm__ volatile ( \
|
||||
" prfm pstl1strm, %3\n" \
|
||||
"1: ldxr %0, %3\n" \
|
||||
" "#ins " %1, %0, %4\n" \
|
||||
" stlxr %w2, %1, %3\n" \
|
||||
" cbnz %w2, 1b\n" \
|
||||
" dmb ish" \
|
||||
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (*ptr) \
|
||||
: __RT_STRINGIFY(constraint) "r" (in_val) \
|
||||
: "memory"); \
|
||||
\
|
||||
return result; \
|
||||
}
|
||||
|
||||
AARCH64_ATOMIC_OP_RETURN(add, add, I)
|
||||
AARCH64_ATOMIC_OP_RETURN(sub, sub, J)
|
||||
AARCH64_ATOMIC_OP_RETURN(and, and, K)
|
||||
AARCH64_ATOMIC_OP_RETURN(or, orr, K)
|
||||
AARCH64_ATOMIC_OP_RETURN(xor, eor, K)
|
||||
|
||||
rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
|
||||
{
|
||||
rt_atomic_t ret, tmp;
|
||||
|
||||
__asm__ volatile (
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" stlxr %w1, %3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish"
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*ptr)
|
||||
: "r" (val)
|
||||
: "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
|
||||
{
|
||||
rt_hw_atomic_and(ptr, 0);
|
||||
}
|
||||
|
||||
rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
|
||||
{
|
||||
return rt_hw_atomic_or(ptr, 1);
|
||||
}
|
||||
|
||||
rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t new)
|
||||
{
|
||||
rt_atomic_t tmp, oldval;
|
||||
__asm__ volatile (
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" eor %1, %0, %3\n"
|
||||
" cbnz %1, 2f\n"
|
||||
" stlxr %w1, %4, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish\n"
|
||||
" mov %w1, #1\n"
|
||||
" b 3f\n"
|
||||
"2: str %0, [%5]\n"
|
||||
" mov %w1, #0\n"
|
||||
"3:"
|
||||
: "=&r" (oldval), "=&r" (tmp), "+Q" (*ptr)
|
||||
: "Kr" (*old), "r" (new), "r" (old)
|
||||
: "memory");
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
138
RT_Thread/libcpu/aarch64/common/backtrace.c
Normal file
138
RT_Thread/libcpu/aarch64/common/backtrace.c
Normal file
@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2022-06-02 Jesven the first version
|
||||
* 2023-06-24 WangXiaoyao Support backtrace for non-active thread
|
||||
* 2023-10-16 Shell Support a new backtrace framework
|
||||
*/
|
||||
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <rthw.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "mm_aspace.h"
|
||||
#include "mmu.h"
|
||||
|
||||
#define INST_WORD_BYTES 4
|
||||
#define WORD sizeof(rt_base_t)
|
||||
#define ARCH_CONTEXT_FETCH(pctx, id) (*(((unsigned long *)pctx) + (id)))
|
||||
#define PTR_NORMALIZE(ptr) (ptr = rt_backtrace_ptr_normalize(ptr))
|
||||
|
||||
rt_weak void *rt_backtrace_ptr_normalize(void *ptr)
|
||||
{
|
||||
return ptr;
|
||||
}
|
||||
|
||||
rt_inline rt_err_t _bt_kaddr(rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
|
||||
{
|
||||
rt_err_t rc;
|
||||
|
||||
PTR_NORMALIZE(fp);
|
||||
|
||||
frame->fp = *fp;
|
||||
frame->pc = *(fp + 1) - INST_WORD_BYTES;
|
||||
|
||||
if ((rt_ubase_t)fp == frame->fp)
|
||||
{
|
||||
rc = -RT_ERROR;
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = RT_EOK;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
#include <lwp_user_mm.h>
|
||||
rt_inline rt_err_t _bt_uaddr(rt_lwp_t lwp, rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
|
||||
{
|
||||
rt_err_t rc;
|
||||
if (lwp_data_get(lwp, &frame->fp, fp, WORD) != WORD)
|
||||
{
|
||||
rc = -RT_EFAULT;
|
||||
}
|
||||
else if (lwp_data_get(lwp, &frame->pc, fp + 1, WORD) != WORD)
|
||||
{
|
||||
rc = -RT_EFAULT;
|
||||
}
|
||||
else if ((rt_base_t)fp == frame->fp)
|
||||
{
|
||||
rc = -RT_ERROR;
|
||||
}
|
||||
else
|
||||
{
|
||||
frame->pc -= INST_WORD_BYTES;
|
||||
rc = RT_EOK;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
#endif /* RT_USING_SMART */
|
||||
|
||||
rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
|
||||
{
|
||||
rt_err_t rc = -RT_ERROR;
|
||||
rt_ubase_t *fp = (rt_ubase_t *)frame->fp;
|
||||
|
||||
if (fp && !((long)fp & 0x7))
|
||||
{
|
||||
#ifdef RT_USING_SMART
|
||||
#define IN_USER_SPACE(addr) ((rt_ubase_t)(addr) >= USER_VADDR_START && (rt_ubase_t)(addr) < USER_VADDR_TOP)
|
||||
if (thread && thread->lwp && rt_scheduler_is_available())
|
||||
{
|
||||
rt_lwp_t lwp = thread->lwp;
|
||||
void *this_lwp = lwp_self();
|
||||
if ((!IN_USER_SPACE(fp) || this_lwp == lwp) && rt_kmem_v2p(fp) != ARCH_MAP_FAILED)
|
||||
{
|
||||
rc = _bt_kaddr(fp, frame);
|
||||
}
|
||||
else if (lwp_user_accessible_ext(lwp, fp, sizeof(rt_base_t)))
|
||||
{
|
||||
rc = _bt_uaddr(lwp, fp, frame);
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = -RT_EFAULT;
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (rt_kmem_v2p(fp) != ARCH_MAP_FAILED)
|
||||
{
|
||||
rc = _bt_kaddr(fp, frame);
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = -RT_EFAULT;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = -RT_EFAULT;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
|
||||
{
|
||||
rt_err_t rc;
|
||||
if (!thread || !frame)
|
||||
{
|
||||
rc = -RT_EINVAL;
|
||||
}
|
||||
else
|
||||
{
|
||||
frame->pc = ARCH_CONTEXT_FETCH(thread->sp, 0);
|
||||
frame->fp = ARCH_CONTEXT_FETCH(thread->sp, 4);
|
||||
rc = RT_EOK;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
204
RT_Thread/libcpu/aarch64/common/cache.S
Normal file
204
RT_Thread/libcpu/aarch64/common/cache.S
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2020-03-17 bigmagic first version
|
||||
*/
|
||||
|
||||
/*
|
||||
* void __asm_dcache_level(level)
|
||||
*
|
||||
* flush or invalidate one level cache.
|
||||
*
|
||||
* x0: cache level
|
||||
* x1: 0 clean & invalidate, 1 invalidate only
|
||||
* x2~x9: clobbered
|
||||
*/
|
||||
.globl __asm_dcache_level
|
||||
__asm_dcache_level:
|
||||
lsl x12, x0, #1
|
||||
msr csselr_el1, x12 /* select cache level */
|
||||
isb /* sync change of cssidr_el1 */
|
||||
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
|
||||
and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
|
||||
add x2, x2, #4 /* x2 <- log2(cache line size) */
|
||||
mov x3, #0x3ff
|
||||
and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
|
||||
clz w5, w3 /* bit position of #ways */
|
||||
mov x4, #0x7fff
|
||||
and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
|
||||
/* x12 <- cache level << 1 */
|
||||
/* x2 <- line length offset */
|
||||
/* x3 <- number of cache ways - 1 */
|
||||
/* x4 <- number of cache sets - 1 */
|
||||
/* x5 <- bit position of #ways */
|
||||
|
||||
loop_set:
|
||||
mov x6, x3 /* x6 <- working copy of #ways */
|
||||
loop_way:
|
||||
lsl x7, x6, x5
|
||||
orr x9, x12, x7 /* map way and level to cisw value */
|
||||
lsl x7, x4, x2
|
||||
orr x9, x9, x7 /* map set number to cisw value */
|
||||
tbz w1, #0, 1f
|
||||
dc isw, x9
|
||||
b 2f
|
||||
1: dc cisw, x9 /* clean & invalidate by set/way */
|
||||
2: subs x6, x6, #1 /* decrement the way */
|
||||
b.ge loop_way
|
||||
subs x4, x4, #1 /* decrement the set */
|
||||
b.ge loop_set
|
||||
|
||||
ret
|
||||
|
||||
/*
|
||||
* void __asm_flush_dcache_all(int invalidate_only)
|
||||
*
|
||||
* x0: 0 clean & invalidate, 1 invalidate only
|
||||
*
|
||||
* flush or invalidate all data cache by SET/WAY.
|
||||
*/
|
||||
.globl __asm_dcache_all
|
||||
__asm_dcache_all:
|
||||
mov x1, x0
|
||||
dsb sy
|
||||
mrs x10, clidr_el1 /* read clidr_el1 */
|
||||
lsr x11, x10, #24
|
||||
and x11, x11, #0x7 /* x11 <- loc */
|
||||
cbz x11, finished /* if loc is 0, exit */
|
||||
mov x15, lr
|
||||
mov x0, #0 /* start flush at cache level 0 */
|
||||
/* x0 <- cache level */
|
||||
/* x10 <- clidr_el1 */
|
||||
/* x11 <- loc */
|
||||
/* x15 <- return address */
|
||||
|
||||
loop_level:
|
||||
lsl x12, x0, #1
|
||||
add x12, x12, x0 /* x0 <- tripled cache level */
|
||||
lsr x12, x10, x12
|
||||
and x12, x12, #7 /* x12 <- cache type */
|
||||
cmp x12, #2
|
||||
b.lt skip /* skip if no cache or icache */
|
||||
bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
|
||||
skip:
|
||||
add x0, x0, #1 /* increment cache level */
|
||||
cmp x11, x0
|
||||
b.gt loop_level
|
||||
|
||||
mov x0, #0
|
||||
msr csselr_el1, x0 /* restore csselr_el1 */
|
||||
dsb sy
|
||||
isb
|
||||
mov lr, x15
|
||||
|
||||
finished:
|
||||
ret
|
||||
|
||||
.globl __asm_flush_dcache_all
|
||||
__asm_flush_dcache_all:
|
||||
mov x0, #0
|
||||
b __asm_dcache_all
|
||||
|
||||
.globl __asm_invalidate_dcache_all
|
||||
__asm_invalidate_dcache_all:
|
||||
mov x0, #0x1
|
||||
b __asm_dcache_all
|
||||
|
||||
/*
|
||||
* void __asm_flush_dcache_range(start, end)
|
||||
*
|
||||
* clean & invalidate data cache in the range
|
||||
*
|
||||
* x0: start address
|
||||
* x1: end address
|
||||
*/
|
||||
.globl __asm_flush_dcache_range
|
||||
__asm_flush_dcache_range:
|
||||
mrs x3, ctr_el0
|
||||
lsr x3, x3, #16
|
||||
and x3, x3, #0xf
|
||||
mov x2, #4
|
||||
lsl x2, x2, x3 /* cache line size */
|
||||
|
||||
/* x2 <- minimal cache line size in cache system */
|
||||
sub x3, x2, #1
|
||||
bic x0, x0, x3
|
||||
|
||||
1: dc civac, x0 /* clean & invalidate data or unified cache */
|
||||
add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
|
||||
/* void __asm_invalidate_dcache_range(start, end)
|
||||
*
|
||||
* invalidate data cache in the range
|
||||
*
|
||||
* x0: start address
|
||||
* x1: end address
|
||||
*/
|
||||
.globl __asm_invalidate_dcache_range
|
||||
__asm_invalidate_dcache_range:
|
||||
mrs x3, ctr_el0
|
||||
lsr x3, x3, #16
|
||||
and x3, x3, #0xf
|
||||
mov x2, #4
|
||||
lsl x2, x2, x3 /* cache line size */
|
||||
|
||||
/* x2 <- minimal cache line size in cache system */
|
||||
sub x3, x2, #1
|
||||
bic x0, x0, x3
|
||||
|
||||
1: dc ivac, x0 /* invalidate data or unified cache */
|
||||
add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
|
||||
/* void __asm_invalidate_icache_range(start, end)
|
||||
*
|
||||
* invalidate icache in the range
|
||||
*
|
||||
* x0: start address
|
||||
* x1: end address
|
||||
*/
|
||||
.globl __asm_invalidate_icache_range
|
||||
__asm_invalidate_icache_range:
|
||||
mrs x3, ctr_el0
|
||||
and x3, x3, #0xf
|
||||
mov x2, #4
|
||||
lsl x2, x2, x3 /* cache line size */
|
||||
|
||||
/* x2 <- minimal cache line size in cache system */
|
||||
sub x3, x2, #1
|
||||
bic x0, x0, x3
|
||||
|
||||
1: ic ivau, x0 /* invalidate instruction or unified cache */
|
||||
add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
|
||||
/*
|
||||
* void __asm_invalidate_icache_all(void)
|
||||
*
|
||||
* invalidate all tlb entries.
|
||||
*/
|
||||
.globl __asm_invalidate_icache_all
|
||||
__asm_invalidate_icache_all:
|
||||
dsb sy
|
||||
ic ialluis
|
||||
isb sy
|
||||
ret
|
||||
|
||||
.globl __asm_flush_l3_cache
|
||||
__asm_flush_l3_cache:
|
||||
mov x0, #0 /* return status as success */
|
||||
ret
|
||||
79
RT_Thread/libcpu/aarch64/common/cache_ops.c
Normal file
79
RT_Thread/libcpu/aarch64/common/cache_ops.c
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2018, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-03-29 quanzhao the first version
|
||||
*/
|
||||
#include <rthw.h>
|
||||
#include <rtdef.h>
|
||||
|
||||
void __asm_invalidate_icache_all(void);
|
||||
void __asm_flush_dcache_all(void);
|
||||
void __asm_flush_dcache_range(rt_size_t start, rt_size_t end);
|
||||
void __asm_invalidate_dcache_range(rt_size_t start, rt_size_t end);
|
||||
void __asm_invalidate_icache_range(rt_size_t start, rt_size_t end);
|
||||
void __asm_invalidate_dcache_all(void);
|
||||
void __asm_invalidate_icache_all(void);
|
||||
|
||||
rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size)
|
||||
{
|
||||
__asm_invalidate_icache_range((rt_size_t)addr, (rt_size_t)addr + size);
|
||||
}
|
||||
|
||||
void rt_hw_cpu_dcache_invalidate(void *addr, rt_size_t size)
|
||||
{
|
||||
__asm_invalidate_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
|
||||
}
|
||||
|
||||
void rt_hw_cpu_dcache_clean(void *addr, rt_size_t size)
|
||||
{
|
||||
__asm_flush_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
|
||||
}
|
||||
|
||||
void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, rt_size_t size)
|
||||
{
|
||||
__asm_flush_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
|
||||
}
|
||||
|
||||
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
|
||||
{
|
||||
if (ops == RT_HW_CACHE_INVALIDATE)
|
||||
{
|
||||
rt_hw_cpu_icache_invalidate(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
|
||||
{
|
||||
if (ops == RT_HW_CACHE_FLUSH)
|
||||
{
|
||||
rt_hw_cpu_dcache_clean(addr, size);
|
||||
}
|
||||
else if (ops == RT_HW_CACHE_INVALIDATE)
|
||||
{
|
||||
rt_hw_cpu_dcache_invalidate(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
rt_base_t rt_hw_cpu_icache_status(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
rt_base_t rt_hw_cpu_dcache_status(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
243
RT_Thread/libcpu/aarch64/common/cpu.c
Normal file
243
RT_Thread/libcpu/aarch64/common/cpu.c
Normal file
@ -0,0 +1,243 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2011-09-15 Bernard first version
|
||||
* 2019-07-28 zdzn add smp support
|
||||
* 2023-02-21 GuEe-GUI mov cpu ofw init to setup
|
||||
* 2024-04-29 Shell Add generic ticket spinlock using C11 atomic
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <rtdevice.h>
|
||||
#include <cpu.h>
|
||||
|
||||
#define DBG_TAG "libcpu.aarch64.cpu"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
|
||||
#define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
|
||||
#define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
|
||||
#define cpuid_to_hwid(cpuid) \
|
||||
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? rt_cpu_mpidr_early[cpuid] : ID_ERROR)
|
||||
#define set_hwid(cpuid, hwid) \
|
||||
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (rt_cpu_mpidr_early[cpuid] = (hwid)) : ID_ERROR)
|
||||
#define get_cpu_node(cpuid) \
|
||||
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? _cpu_node[cpuid] : NULL)
|
||||
#define set_cpu_node(cpuid, node) \
|
||||
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (_cpu_node[cpuid] = node) : NULL)
|
||||
|
||||
typedef rt_hw_spinlock_t arch_spinlock_t;
|
||||
struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
// _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
|
||||
rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] rt_weak = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
|
||||
#else
|
||||
/* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
|
||||
rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
|
||||
{
|
||||
[0] = 0x80000000,
|
||||
[1] = 0x80000001,
|
||||
[2] = 0x80000002,
|
||||
[3] = 0x80000003,
|
||||
[4] = 0x80000004,
|
||||
[5] = 0x80000005,
|
||||
[6] = 0x80000006,
|
||||
[7] = 0x80000007,
|
||||
[RT_CPUS_NR] = 0
|
||||
};
|
||||
#endif /* RT_USING_SMART */
|
||||
|
||||
/* in support of C11 atomic */
|
||||
#if __STDC_VERSION__ >= 201112L
|
||||
#include <stdatomic.h>
|
||||
|
||||
union _spinlock
|
||||
{
|
||||
_Atomic(rt_uint32_t) _value;
|
||||
struct
|
||||
{
|
||||
_Atomic(rt_uint16_t) owner;
|
||||
_Atomic(rt_uint16_t) next;
|
||||
} ticket;
|
||||
};
|
||||
|
||||
void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock)
|
||||
{
|
||||
union _spinlock *lock = (void *)_lock;
|
||||
|
||||
/**
|
||||
* just a dummy note that this is an atomic operation, though it alway is
|
||||
* even without usage of atomic API in arm64
|
||||
*/
|
||||
atomic_store_explicit(&lock->_value, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *_lock)
|
||||
{
|
||||
rt_bool_t rc;
|
||||
rt_uint32_t readonce;
|
||||
union _spinlock temp;
|
||||
union _spinlock *lock = (void *)_lock;
|
||||
|
||||
readonce = atomic_load_explicit(&lock->_value, memory_order_acquire);
|
||||
temp._value = readonce;
|
||||
|
||||
if (temp.ticket.owner != temp.ticket.next)
|
||||
{
|
||||
rc = RT_FALSE;
|
||||
}
|
||||
else
|
||||
{
|
||||
temp.ticket.next += 1;
|
||||
rc = atomic_compare_exchange_strong_explicit(
|
||||
&lock->_value, &readonce, temp._value,
|
||||
memory_order_acquire, memory_order_relaxed);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
rt_inline rt_base_t _load_acq_exclusive(_Atomic(rt_uint16_t) *halfword)
|
||||
{
|
||||
rt_uint32_t old;
|
||||
__asm__ volatile("ldaxrh %w0, [%1]"
|
||||
: "=&r"(old)
|
||||
: "r"(halfword)
|
||||
: "memory");
|
||||
return old;
|
||||
}
|
||||
|
||||
rt_inline void _send_event_local(void)
|
||||
{
|
||||
__asm__ volatile("sevl");
|
||||
}
|
||||
|
||||
rt_inline void _wait_for_event(void)
|
||||
{
|
||||
__asm__ volatile("wfe" ::: "memory");
|
||||
}
|
||||
|
||||
void rt_hw_spin_lock(rt_hw_spinlock_t *_lock)
|
||||
{
|
||||
union _spinlock *lock = (void *)_lock;
|
||||
rt_uint16_t ticket =
|
||||
atomic_fetch_add_explicit(&lock->ticket.next, 1, memory_order_relaxed);
|
||||
|
||||
if (atomic_load_explicit(&lock->ticket.owner, memory_order_acquire) !=
|
||||
ticket)
|
||||
{
|
||||
_send_event_local();
|
||||
do
|
||||
{
|
||||
_wait_for_event();
|
||||
}
|
||||
while (_load_acq_exclusive(&lock->ticket.owner) != ticket);
|
||||
}
|
||||
}
|
||||
|
||||
void rt_hw_spin_unlock(rt_hw_spinlock_t *_lock)
|
||||
{
|
||||
union _spinlock *lock = (void *)_lock;
|
||||
atomic_fetch_add_explicit(&lock->ticket.owner, 1, memory_order_release);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
|
||||
{
|
||||
// load in cpu_hw_ids in cpuid_to_hwid,
|
||||
// cpu_ops to cpu_ops_tbl
|
||||
if (num_cpus > RT_CPUS_NR)
|
||||
{
|
||||
LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
|
||||
num_cpus = RT_CPUS_NR;
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_cpus; i++)
|
||||
{
|
||||
set_hwid(i, cpu_hw_ids[i]);
|
||||
cpu_ops_tbl[i] = cpu_ops[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** init cpu with hardcoded infomation or parsing from FDT */
|
||||
static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
|
||||
{
|
||||
int retval;
|
||||
|
||||
// first setup cpu_ops_tbl and cpuid_to_hwid
|
||||
if (num_cpus > 0)
|
||||
retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
|
||||
else
|
||||
{
|
||||
retval = -1;
|
||||
}
|
||||
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
// using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
|
||||
// assuming that cpuid 0 has already init
|
||||
for (int i = 1; i < RT_CPUS_NR; i++)
|
||||
{
|
||||
if (rt_cpu_mpidr_early[i] == ID_ERROR)
|
||||
{
|
||||
LOG_E("Failed to find hardware id of CPU %d", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_init)
|
||||
{
|
||||
retval = cpu_ops_tbl[i]->cpu_init(i, RT_NULL);
|
||||
CHECK_RETVAL(retval);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
|
||||
, rt_cpu_mpidr_early[i], cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief boot cpu with hardcoded data
|
||||
*
|
||||
* @param num_cpus number of cpus
|
||||
* @param cpu_hw_ids each element represents a hwid of cpu[i]
|
||||
* @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
|
||||
* @return int 0 on success,
|
||||
*/
|
||||
int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
|
||||
{
|
||||
int retval = 0;
|
||||
if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
|
||||
return -1;
|
||||
|
||||
retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
|
||||
CHECK_RETVAL(retval);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
#endif /*RT_USING_SMP*/
|
||||
|
||||
/**
|
||||
* @addtogroup ARM CPU
|
||||
*/
|
||||
/*@{*/
|
||||
|
||||
const char *rt_hw_cpu_arch(void)
|
||||
{
|
||||
return "aarch64";
|
||||
}
|
||||
|
||||
/*@}*/
|
||||
338
RT_Thread/libcpu/aarch64/common/cpu_gcc.S
Normal file
338
RT_Thread/libcpu/aarch64/common/cpu_gcc.S
Normal file
@ -0,0 +1,338 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Date Author Notes
|
||||
* 2018-10-06 ZhaoXiaowei the first version (cpu_gcc.S)
|
||||
* 2021-05-18 Jesven the first version (context_gcc.S)
|
||||
* 2024-01-06 Shell Fix barrier on irq_disable/enable
|
||||
* 2024-01-18 Shell fix implicit dependency of cpuid management
|
||||
* 2024-03-28 Shell Move cpu codes from context_gcc.S
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif
|
||||
|
||||
#include "rtconfig.h"
|
||||
#include "asm-generic.h"
|
||||
#include "asm-fpu.h"
|
||||
#include "armv8.h"
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
|
||||
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
|
||||
#endif /* RT_USING_SMP */
|
||||
|
||||
.text
|
||||
|
||||
/**
|
||||
* #ifdef RT_USING_OFW
|
||||
* void rt_hw_cpu_id_set(long cpuid)
|
||||
* #else
|
||||
* void rt_hw_cpu_id_set(void)
|
||||
* #endif
|
||||
*/
|
||||
.type rt_hw_cpu_id_set, @function
|
||||
rt_hw_cpu_id_set:
|
||||
#ifdef ARCH_USING_GENERIC_CPUID
|
||||
.globl rt_hw_cpu_id_set
|
||||
#else /* !ARCH_USING_GENERIC_CPUID */
|
||||
.weak rt_hw_cpu_id_set
|
||||
#endif /* ARCH_USING_GENERIC_CPUID */
|
||||
|
||||
#ifndef RT_USING_OFW
|
||||
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
|
||||
#ifdef ARCH_ARM_CORTEX_A55
|
||||
lsr x0, x0, #8
|
||||
#endif /* ARCH_ARM_CORTEX_A55 */
|
||||
and x0, x0, #15
|
||||
#endif /* !RT_USING_OFW */
|
||||
|
||||
#ifdef ARCH_USING_HW_THREAD_SELF
|
||||
msr tpidrro_el0, x0
|
||||
#else /* !ARCH_USING_HW_THREAD_SELF */
|
||||
msr tpidr_el1, x0
|
||||
#endif /* ARCH_USING_HW_THREAD_SELF */
|
||||
ret
|
||||
|
||||
/*
|
||||
int rt_hw_cpu_id(void)
|
||||
*/
|
||||
.type rt_hw_cpu_id, @function
|
||||
rt_hw_cpu_id:
|
||||
#ifdef ARCH_USING_GENERIC_CPUID
|
||||
.globl rt_hw_cpu_id
|
||||
#else /* !ARCH_USING_GENERIC_CPUID */
|
||||
.weak rt_hw_cpu_id
|
||||
#endif /* ARCH_USING_GENERIC_CPUID */
|
||||
|
||||
#if RT_CPUS_NR > 1
|
||||
#ifdef ARCH_USING_GENERIC_CPUID
|
||||
mrs x0, tpidrro_el0
|
||||
#else /* !ARCH_USING_GENERIC_CPUID */
|
||||
mrs x0, tpidr_el1
|
||||
#endif /* ARCH_USING_GENERIC_CPUID */
|
||||
#else /* RT_CPUS_NR == 1 */
|
||||
mov x0, xzr
|
||||
#endif
|
||||
ret
|
||||
|
||||
/*
|
||||
void rt_hw_set_process_id(size_t id)
|
||||
*/
|
||||
.global rt_hw_set_process_id
|
||||
rt_hw_set_process_id:
|
||||
msr CONTEXTIDR_EL1, x0
|
||||
ret
|
||||
|
||||
/*
|
||||
*enable gtimer
|
||||
*/
|
||||
.globl rt_hw_gtimer_enable
|
||||
rt_hw_gtimer_enable:
|
||||
mov x0, #1
|
||||
msr CNTP_CTL_EL0, x0
|
||||
ret
|
||||
|
||||
/*
|
||||
*set gtimer CNTP_TVAL_EL0 value
|
||||
*/
|
||||
.globl rt_hw_set_gtimer_val
|
||||
rt_hw_set_gtimer_val:
|
||||
msr CNTP_TVAL_EL0, x0
|
||||
ret
|
||||
|
||||
/*
|
||||
*get gtimer CNTP_TVAL_EL0 value
|
||||
*/
|
||||
.globl rt_hw_get_gtimer_val
|
||||
rt_hw_get_gtimer_val:
|
||||
mrs x0, CNTP_TVAL_EL0
|
||||
ret
|
||||
|
||||
|
||||
.globl rt_hw_get_cntpct_val
|
||||
rt_hw_get_cntpct_val:
|
||||
mrs x0, CNTPCT_EL0
|
||||
ret
|
||||
|
||||
/*
|
||||
*get gtimer frq value
|
||||
*/
|
||||
.globl rt_hw_get_gtimer_frq
|
||||
rt_hw_get_gtimer_frq:
|
||||
mrs x0, CNTFRQ_EL0
|
||||
ret
|
||||
|
||||
.global rt_hw_interrupt_is_disabled
|
||||
rt_hw_interrupt_is_disabled:
|
||||
mrs x0, DAIF
|
||||
tst x0, #0xc0
|
||||
cset x0, NE
|
||||
ret
|
||||
|
||||
/*
|
||||
* rt_base_t rt_hw_interrupt_disable();
|
||||
*/
|
||||
.globl rt_hw_interrupt_disable
|
||||
rt_hw_interrupt_disable:
|
||||
mrs x0, DAIF
|
||||
and x0, x0, #0xc0
|
||||
cmp x0, #0xc0
|
||||
/* branch if bits not both set(zero) */
|
||||
bne 1f
|
||||
ret
|
||||
1:
|
||||
msr DAIFSet, #3
|
||||
dsb nsh
|
||||
isb
|
||||
ret
|
||||
|
||||
/*
|
||||
* void rt_hw_interrupt_enable(rt_base_t level);
|
||||
*/
|
||||
.globl rt_hw_interrupt_enable
|
||||
rt_hw_interrupt_enable:
|
||||
and x0, x0, #0xc0
|
||||
cmp x0, #0xc0
|
||||
/* branch if one of the bits not set(zero) */
|
||||
bne 1f
|
||||
ret
|
||||
1:
|
||||
isb
|
||||
dsb nsh
|
||||
and x0, x0, #0xc0
|
||||
mrs x1, DAIF
|
||||
bic x1, x1, #0xc0
|
||||
orr x0, x0, x1
|
||||
msr DAIF, x0
|
||||
ret
|
||||
|
||||
.globl rt_hw_get_current_el
|
||||
rt_hw_get_current_el:
|
||||
mrs x0, CurrentEL
|
||||
cmp x0, 0xc
|
||||
b.eq 3f
|
||||
cmp x0, 0x8
|
||||
b.eq 2f
|
||||
cmp x0, 0x4
|
||||
b.eq 1f
|
||||
|
||||
ldr x0, =0
|
||||
b 0f
|
||||
3:
|
||||
ldr x0, =3
|
||||
b 0f
|
||||
2:
|
||||
ldr x0, =2
|
||||
b 0f
|
||||
1:
|
||||
ldr x0, =1
|
||||
b 0f
|
||||
0:
|
||||
ret
|
||||
|
||||
|
||||
.globl rt_hw_set_current_vbar
|
||||
rt_hw_set_current_vbar:
|
||||
mrs x1, CurrentEL
|
||||
cmp x1, 0xc
|
||||
b.eq 3f
|
||||
cmp x1, 0x8
|
||||
b.eq 2f
|
||||
cmp x1, 0x4
|
||||
b.eq 1f
|
||||
b 0f
|
||||
3:
|
||||
msr VBAR_EL3,x0
|
||||
b 0f
|
||||
2:
|
||||
msr VBAR_EL2,x0
|
||||
b 0f
|
||||
1:
|
||||
msr VBAR_EL1,x0
|
||||
b 0f
|
||||
0:
|
||||
ret
|
||||
|
||||
.globl rt_hw_set_elx_env
|
||||
rt_hw_set_elx_env:
|
||||
mrs x1, CurrentEL
|
||||
cmp x1, 0xc
|
||||
b.eq 3f
|
||||
cmp x1, 0x8
|
||||
b.eq 2f
|
||||
cmp x1, 0x4
|
||||
b.eq 1f
|
||||
b 0f
|
||||
3:
|
||||
mrs x0, SCR_EL3
|
||||
orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
|
||||
msr SCR_EL3, x0
|
||||
b 0f
|
||||
2:
|
||||
mrs x0, HCR_EL2
|
||||
orr x0, x0, #0x38
|
||||
msr HCR_EL2, x0
|
||||
b 0f
|
||||
1:
|
||||
b 0f
|
||||
0:
|
||||
ret
|
||||
|
||||
.globl rt_cpu_vector_set_base
|
||||
rt_cpu_vector_set_base:
|
||||
msr VBAR_EL1, x0
|
||||
ret
|
||||
|
||||
|
||||
/**
|
||||
* unsigned long rt_hw_ffz(unsigned long x)
|
||||
*/
|
||||
.globl rt_hw_ffz
|
||||
rt_hw_ffz:
|
||||
mvn x1, x0
|
||||
clz x0, x1
|
||||
mov x1, #0x3f
|
||||
sub x0, x1, x0
|
||||
ret
|
||||
|
||||
.globl rt_hw_clz
|
||||
rt_hw_clz:
|
||||
clz x0, x0
|
||||
ret
|
||||
|
||||
/**
|
||||
* Spinlock (fallback implementation)
|
||||
*/
|
||||
|
||||
rt_hw_spin_lock_init:
|
||||
.weak rt_hw_spin_lock_init
|
||||
stlr wzr, [x0]
|
||||
ret
|
||||
|
||||
rt_hw_spin_trylock:
|
||||
.weak rt_hw_spin_trylock
|
||||
sub sp, sp, #16
|
||||
ldar w2, [x0]
|
||||
add x1, sp, 8
|
||||
stlr w2, [x1]
|
||||
ldarh w1, [x1]
|
||||
and w1, w1, 65535
|
||||
add x3, sp, 10
|
||||
ldarh w3, [x3]
|
||||
cmp w1, w3, uxth
|
||||
beq 1f
|
||||
mov w0, 0
|
||||
add sp, sp, 16
|
||||
ret
|
||||
1:
|
||||
add x1, sp, 10
|
||||
2:
|
||||
ldaxrh w3, [x1]
|
||||
add w3, w3, 1
|
||||
stlxrh w4, w3, [x1]
|
||||
cbnz w4, 2b
|
||||
add x1, sp, 8
|
||||
ldar w1, [x1]
|
||||
3:
|
||||
ldaxr w3, [x0]
|
||||
cmp w3, w2
|
||||
bne 4f
|
||||
stxr w4, w1, [x0]
|
||||
cbnz w4, 3b
|
||||
4:
|
||||
cset w0, eq
|
||||
add sp, sp, 16
|
||||
ret
|
||||
|
||||
rt_hw_spin_lock:
|
||||
.weak rt_hw_spin_lock
|
||||
add x1, x0, 2
|
||||
1:
|
||||
ldxrh w2, [x1]
|
||||
add w3, w2, 1
|
||||
stxrh w4, w3, [x1]
|
||||
cbnz w4, 1b
|
||||
and w2, w2, 65535
|
||||
ldarh w1, [x0]
|
||||
cmp w2, w1, uxth
|
||||
beq 3f
|
||||
sevl
|
||||
2:
|
||||
wfe
|
||||
ldaxrh w1, [x0]
|
||||
cmp w2, w1
|
||||
bne 2b
|
||||
3:
|
||||
ret
|
||||
|
||||
rt_hw_spin_unlock:
|
||||
.weak rt_hw_spin_unlock
|
||||
ldxrh w1, [x0]
|
||||
add w1, w1, 1
|
||||
stlxrh w2, w1, [x0]
|
||||
cbnz w2, rt_hw_spin_unlock
|
||||
ret
|
||||
40
RT_Thread/libcpu/aarch64/common/cpu_psci.c
Normal file
40
RT_Thread/libcpu/aarch64/common/cpu_psci.c
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2019, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-02-21 GuEe-GUI replace with drivers/psci
|
||||
*/
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#define DBG_TAG "cpu.aa64"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <cpu.h>
|
||||
#include <cpuport.h>
|
||||
#include <psci.h>
|
||||
|
||||
static int psci_cpu_boot(rt_uint32_t cpuid, rt_uint64_t entry)
|
||||
{
|
||||
return rt_psci_cpu_on(cpuid, entry);
|
||||
}
|
||||
|
||||
static void psci_cpu_shutdown(void)
|
||||
{
|
||||
rt_uint32_t state, state_id = PSCI_POWER_STATE_ID(0, 0, 0, PSCI_POWER_STATE_ID_POWERDOWN);
|
||||
|
||||
state = PSCI_POWER_STATE(PSCI_POWER_STATE_LEVEL_CORES, PSCI_POWER_STATE_TYPE_STANDBY, state_id);
|
||||
|
||||
rt_psci_cpu_off(state);
|
||||
}
|
||||
|
||||
struct cpu_ops_t cpu_psci_ops =
|
||||
{
|
||||
.method = "psci",
|
||||
.cpu_boot = psci_cpu_boot,
|
||||
.cpu_shutdown = psci_cpu_shutdown,
|
||||
};
|
||||
65
RT_Thread/libcpu/aarch64/common/cpu_spin_table.c
Normal file
65
RT_Thread/libcpu/aarch64/common/cpu_spin_table.c
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-02-21 GuEe-GUI replace with ofw
|
||||
*/
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#define DBG_TAG "cpu.aa64"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <cpu.h>
|
||||
#include <cpuport.h>
|
||||
|
||||
#include <ioremap.h>
|
||||
#include <drivers/core/dm.h>
|
||||
|
||||
#ifdef RT_USING_OFW
|
||||
|
||||
static rt_uint64_t cpu_release_addr[RT_CPUS_NR];
|
||||
|
||||
static int spin_table_cpu_init(rt_uint32_t cpuid, void *param)
|
||||
{
|
||||
struct rt_ofw_node *cpu_np = param;
|
||||
|
||||
rt_ofw_prop_read_u64(cpu_np, "cpu-release-addr", &cpu_release_addr[cpuid]);
|
||||
|
||||
LOG_D("Using release address 0x%p for CPU %d", cpu_release_addr[cpuid], cpuid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spin_table_cpu_boot(rt_uint32_t cpuid, rt_uint64_t entry)
|
||||
{
|
||||
void *cpu_release_vaddr;
|
||||
|
||||
cpu_release_vaddr = rt_ioremap((void *)cpu_release_addr[cpuid], sizeof(cpu_release_addr[0]));
|
||||
|
||||
if (!cpu_release_vaddr)
|
||||
{
|
||||
LOG_E("IO remap failing");
|
||||
return -1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("str %0, [%1]" ::"rZ"(entry), "r"(cpu_release_vaddr));
|
||||
rt_hw_barrier(dsb, sy);
|
||||
rt_hw_sev();
|
||||
|
||||
rt_iounmap(cpu_release_vaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct cpu_ops_t cpu_spin_table_ops =
|
||||
{
|
||||
.method = "spin-table",
|
||||
.cpu_init = spin_table_cpu_init,
|
||||
.cpu_boot = spin_table_cpu_boot,
|
||||
};
|
||||
#endif
|
||||
74
RT_Thread/libcpu/aarch64/common/cpuport.c
Normal file
74
RT_Thread/libcpu/aarch64/common/cpuport.c
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-06-21 Zhangyan first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <board.h>
|
||||
|
||||
#ifdef RT_USING_CPU_FFS
|
||||
/**
|
||||
* This function finds the first bit set (beginning with the least significant bit)
|
||||
* in value and return the index of that bit.
|
||||
*
|
||||
* Bits are numbered starting at 1 (the least significant bit). A return value of
|
||||
* zero from any of these functions means that the argument was zero.
|
||||
*
|
||||
* @return return the index of the first bit set. If value is 0, then this function
|
||||
* shall return 0.
|
||||
*/
|
||||
int __rt_ffs(int value)
|
||||
{
|
||||
#ifdef __GNUC__
|
||||
return __builtin_ffs(value);
|
||||
#else
|
||||
__asm__ volatile (
|
||||
"rbit w1, %w0\n"
|
||||
"cmp %w0, 0\n"
|
||||
"clz w1, w1\n"
|
||||
"csinc %w0, wzr, w1, eq\n"
|
||||
: "=r"(value)
|
||||
: "0"(value)
|
||||
);
|
||||
return value;
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned long __rt_ffsl(unsigned long value)
|
||||
{
|
||||
#ifdef __GNUC__
|
||||
return __builtin_ffsl(value);
|
||||
#else
|
||||
if (!value)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
__asm__ volatile ("rbit %0, %0" : "+r" (value));
|
||||
|
||||
return __rt_clz(value);
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned long __rt_clz(unsigned long value)
|
||||
{
|
||||
#ifdef __GNUC__
|
||||
return __builtin_clz(value);
|
||||
#else
|
||||
unsigned long val;
|
||||
|
||||
__asm__ volatile ("clz %0, %1"
|
||||
:"=r"(val)
|
||||
:"r"(value));
|
||||
|
||||
return val;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* RT_USING_CPU_FFS */
|
||||
249
RT_Thread/libcpu/aarch64/common/exception.c
Normal file
249
RT_Thread/libcpu/aarch64/common/exception.c
Normal file
@ -0,0 +1,249 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-02-08 RT-Thread the first version
|
||||
*/
|
||||
#include "rtthread.h"
|
||||
|
||||
static void data_abort(unsigned long far, unsigned long iss)
|
||||
{
|
||||
rt_kprintf("fault addr = 0x%016lx\n", far);
|
||||
if (iss & 0x40)
|
||||
{
|
||||
rt_kprintf("abort caused by write instruction\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_kprintf("abort caused by read instruction\n");
|
||||
}
|
||||
switch (iss & 0x3f)
|
||||
{
|
||||
case 0b000000:
|
||||
rt_kprintf("Address size fault, zeroth level of translation or translation table base register\n");
|
||||
break;
|
||||
|
||||
case 0b000001:
|
||||
rt_kprintf("Address size fault, first level\n");
|
||||
break;
|
||||
|
||||
case 0b000010:
|
||||
rt_kprintf("Address size fault, second level\n");
|
||||
break;
|
||||
|
||||
case 0b000011:
|
||||
rt_kprintf("Address size fault, third level\n");
|
||||
break;
|
||||
|
||||
case 0b000100:
|
||||
rt_kprintf("Translation fault, zeroth level\n");
|
||||
break;
|
||||
|
||||
case 0b000101:
|
||||
rt_kprintf("Translation fault, first level\n");
|
||||
break;
|
||||
|
||||
case 0b000110:
|
||||
rt_kprintf("Translation fault, second level\n");
|
||||
break;
|
||||
|
||||
case 0b000111:
|
||||
rt_kprintf("Translation fault, third level\n");
|
||||
break;
|
||||
|
||||
case 0b001000:
|
||||
rt_kprintf("Access flag fault, zeroth level\n");
|
||||
break;
|
||||
|
||||
case 0b001001:
|
||||
rt_kprintf("Access flag fault, first level\n");
|
||||
break;
|
||||
|
||||
case 0b001010:
|
||||
rt_kprintf("Access flag fault, second level\n");
|
||||
break;
|
||||
|
||||
case 0b001011:
|
||||
rt_kprintf("Access flag fault, third level\n");
|
||||
break;
|
||||
|
||||
case 0b001100:
|
||||
rt_kprintf("Permission fault, zeroth level\n");
|
||||
break;
|
||||
|
||||
case 0b001101:
|
||||
rt_kprintf("Permission fault, first level\n");
|
||||
break;
|
||||
|
||||
case 0b001110:
|
||||
rt_kprintf("Permission fault, second level\n");
|
||||
break;
|
||||
|
||||
case 0b001111:
|
||||
rt_kprintf("Permission fault, third level\n");
|
||||
break;
|
||||
|
||||
case 0b010000:
|
||||
rt_kprintf("Synchronous external abort, not on translation table walk\n");
|
||||
break;
|
||||
|
||||
case 0b011000:
|
||||
rt_kprintf("Synchronous parity or ECC error on memory access, not on translation table walk\n");
|
||||
break;
|
||||
|
||||
case 0b010100:
|
||||
rt_kprintf("Synchronous external abort on translation table walk, zeroth level\n");
|
||||
break;
|
||||
|
||||
case 0b010101:
|
||||
rt_kprintf("Synchronous external abort on translation table walk, first level\n");
|
||||
break;
|
||||
|
||||
case 0b010110:
|
||||
rt_kprintf("Synchronous external abort on translation table walk, second level\n");
|
||||
break;
|
||||
|
||||
case 0b010111:
|
||||
rt_kprintf("Synchronous external abort on translation table walk, third level\n");
|
||||
break;
|
||||
|
||||
case 0b011100:
|
||||
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, zeroth level\n");
|
||||
break;
|
||||
|
||||
case 0b011101:
|
||||
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, first level\n");
|
||||
break;
|
||||
|
||||
case 0b011110:
|
||||
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, second level\n");
|
||||
break;
|
||||
|
||||
case 0b011111:
|
||||
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, third level\n");
|
||||
break;
|
||||
|
||||
case 0b100001:
|
||||
rt_kprintf("Alignment fault\n");
|
||||
break;
|
||||
|
||||
case 0b110000:
|
||||
rt_kprintf("TLB conflict abort\n");
|
||||
break;
|
||||
|
||||
case 0b110100:
|
||||
rt_kprintf("IMPLEMENTATION DEFINED fault (Lockdown fault)\n");
|
||||
break;
|
||||
|
||||
case 0b110101:
|
||||
rt_kprintf("IMPLEMENTATION DEFINED fault (Unsupported Exclusive access fault)\n");
|
||||
break;
|
||||
|
||||
case 0b111101:
|
||||
rt_kprintf("Section Domain Fault, used only for faults reported in the PAR_EL1\n");
|
||||
break;
|
||||
|
||||
case 0b111110:
|
||||
rt_kprintf("Page Domain Fault, used only for faults reported in the PAR_EL1\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
rt_kprintf("unknow abort\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void print_exception(unsigned long esr, unsigned long epc)
|
||||
{
|
||||
rt_uint8_t ec;
|
||||
rt_uint32_t iss;
|
||||
unsigned long fault_addr;
|
||||
rt_kprintf("\nexception info:\n");
|
||||
ec = (unsigned char)((esr >> 26) & 0x3fU);
|
||||
iss = (unsigned int)(esr & 0x00ffffffU);
|
||||
rt_kprintf("esr.EC :0x%02x\n", ec);
|
||||
rt_kprintf("esr.IL :0x%02x\n", (unsigned char)((esr >> 25) & 0x01U));
|
||||
rt_kprintf("esr.ISS:0x%08x\n", iss);
|
||||
rt_kprintf("epc :0x%016p\n", (void *)epc);
|
||||
switch (ec)
|
||||
{
|
||||
case 0x00:
|
||||
rt_kprintf("Exceptions with an unknow reason\n");
|
||||
break;
|
||||
|
||||
case 0x01:
|
||||
rt_kprintf("Exceptions from an WFI or WFE instruction\n");
|
||||
break;
|
||||
|
||||
case 0x03:
|
||||
rt_kprintf("Exceptions from an MCR or MRC access to CP15 from AArch32\n");
|
||||
break;
|
||||
|
||||
case 0x04:
|
||||
rt_kprintf("Exceptions from an MCRR or MRRC access to CP15 from AArch32\n");
|
||||
break;
|
||||
|
||||
case 0x05:
|
||||
rt_kprintf("Exceptions from an MCR or MRC access to CP14 from AArch32\n");
|
||||
break;
|
||||
|
||||
case 0x06:
|
||||
rt_kprintf("Exceptions from an LDC or STC access to CP14 from AArch32\n");
|
||||
break;
|
||||
|
||||
case 0x07:
|
||||
rt_kprintf("Exceptions from Access to Advanced SIMD or floating-point registers\n");
|
||||
break;
|
||||
|
||||
case 0x08:
|
||||
rt_kprintf("Exceptions from an MRC (or VMRS) access to CP10 from AArch32\n");
|
||||
break;
|
||||
|
||||
case 0x0c:
|
||||
rt_kprintf("Exceptions from an MCRR or MRRC access to CP14 from AArch32\n");
|
||||
break;
|
||||
|
||||
case 0x0e:
|
||||
rt_kprintf("Exceptions that occur because ther value of PSTATE.IL is 1\n");
|
||||
break;
|
||||
|
||||
case 0x11:
|
||||
rt_kprintf("SVC call from AArch32 state\n");
|
||||
break;
|
||||
|
||||
case 0x15:
|
||||
rt_kprintf("SVC call from AArch64 state\n");
|
||||
break;
|
||||
|
||||
case 0x20:
|
||||
rt_kprintf("Instruction abort from lower exception level\n");
|
||||
break;
|
||||
|
||||
case 0x21:
|
||||
rt_kprintf("Instruction abort from current exception level\n");
|
||||
break;
|
||||
|
||||
case 0x22:
|
||||
rt_kprintf("PC alignment fault\n");
|
||||
break;
|
||||
|
||||
case 0x24:
|
||||
rt_kprintf("Data abort from a lower Exception level\n");
|
||||
__asm__ volatile("mrs %0, far_el1":"=r"(fault_addr));
|
||||
data_abort(fault_addr, iss);
|
||||
break;
|
||||
|
||||
case 0x25:
|
||||
rt_kprintf("Data abort\n");
|
||||
__asm__ volatile("mrs %0, far_el1":"=r"(fault_addr));
|
||||
data_abort(fault_addr, iss);
|
||||
break;
|
||||
|
||||
default:
|
||||
rt_kprintf("Other error\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
519
RT_Thread/libcpu/aarch64/common/gic.c
Normal file
519
RT_Thread/libcpu/aarch64/common/gic.c
Normal file
@ -0,0 +1,519 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2013-07-20 Bernard first version
|
||||
* 2014-04-03 Grissiom many enhancements
|
||||
* 2018-11-22 Jesven add rt_hw_ipi_send()
|
||||
* add rt_hw_ipi_handler_install()
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV2)
|
||||
|
||||
#include "gic.h"
|
||||
#include "cp15.h"
|
||||
|
||||
struct arm_gic
|
||||
{
|
||||
rt_uint64_t offset; /* the first interrupt index in the vector table */
|
||||
|
||||
rt_uint64_t dist_hw_base; /* the base address of the gic distributor */
|
||||
rt_uint64_t cpu_hw_base; /* the base addrees of the gic cpu interface */
|
||||
};
|
||||
|
||||
/* 'ARM_GIC_MAX_NR' is the number of cores */
|
||||
static struct arm_gic _gic_table[ARM_GIC_MAX_NR];
|
||||
|
||||
/** Macro to access the Generic Interrupt Controller Interface (GICC)
|
||||
*/
|
||||
#define GIC_CPU_CTRL(hw_base) __REG32((hw_base) + 0x00U)
|
||||
#define GIC_CPU_PRIMASK(hw_base) __REG32((hw_base) + 0x04U)
|
||||
#define GIC_CPU_BINPOINT(hw_base) __REG32((hw_base) + 0x08U)
|
||||
#define GIC_CPU_INTACK(hw_base) __REG32((hw_base) + 0x0cU)
|
||||
#define GIC_CPU_EOI(hw_base) __REG32((hw_base) + 0x10U)
|
||||
#define GIC_CPU_RUNNINGPRI(hw_base) __REG32((hw_base) + 0x14U)
|
||||
#define GIC_CPU_HIGHPRI(hw_base) __REG32((hw_base) + 0x18U)
|
||||
#define GIC_CPU_IIDR(hw_base) __REG32((hw_base) + 0xFCU)
|
||||
|
||||
/** Macro to access the Generic Interrupt Controller Distributor (GICD)
|
||||
*/
|
||||
#define GIC_DIST_CTRL(hw_base) __REG32((hw_base) + 0x000U)
|
||||
#define GIC_DIST_TYPE(hw_base) __REG32((hw_base) + 0x004U)
|
||||
#define GIC_DIST_IGROUP(hw_base, n) __REG32((hw_base) + 0x080U + ((n)/32U) * 4U)
|
||||
#define GIC_DIST_ENABLE_SET(hw_base, n) __REG32((hw_base) + 0x100U + ((n)/32U) * 4U)
|
||||
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) __REG32((hw_base) + 0x180U + ((n)/32U) * 4U)
|
||||
#define GIC_DIST_PENDING_SET(hw_base, n) __REG32((hw_base) + 0x200U + ((n)/32U) * 4U)
|
||||
#define GIC_DIST_PENDING_CLEAR(hw_base, n) __REG32((hw_base) + 0x280U + ((n)/32U) * 4U)
|
||||
#define GIC_DIST_ACTIVE_SET(hw_base, n) __REG32((hw_base) + 0x300U + ((n)/32U) * 4U)
|
||||
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) __REG32((hw_base) + 0x380U + ((n)/32U) * 4U)
|
||||
#define GIC_DIST_PRI(hw_base, n) __REG32((hw_base) + 0x400U + ((n)/4U) * 4U)
|
||||
#define GIC_DIST_TARGET(hw_base, n) __REG32((hw_base) + 0x800U + ((n)/4U) * 4U)
|
||||
#define GIC_DIST_CONFIG(hw_base, n) __REG32((hw_base) + 0xc00U + ((n)/16U) * 4U)
|
||||
#define GIC_DIST_SOFTINT(hw_base) __REG32((hw_base) + 0xf00U)
|
||||
#define GIC_DIST_CPENDSGI(hw_base, n) __REG32((hw_base) + 0xf10U + ((n)/4U) * 4U)
|
||||
#define GIC_DIST_SPENDSGI(hw_base, n) __REG32((hw_base) + 0xf20U + ((n)/4U) * 4U)
|
||||
#define GIC_DIST_ICPIDR2(hw_base) __REG32((hw_base) + 0xfe8U)
|
||||
|
||||
static unsigned int _gic_max_irq;
|
||||
|
||||
int arm_gic_get_active_irq(rt_uint64_t index)
|
||||
{
|
||||
int irq;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = GIC_CPU_INTACK(_gic_table[index].cpu_hw_base);
|
||||
irq += _gic_table[index].offset;
|
||||
return irq;
|
||||
}
|
||||
|
||||
void arm_gic_ack(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask = 1U << (irq % 32U);
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
GIC_CPU_EOI(_gic_table[index].cpu_hw_base) = irq;
|
||||
}
|
||||
|
||||
void arm_gic_mask(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask = 1U << (irq % 32U);
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
|
||||
void arm_gic_umask(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask = 1U << (irq % 32U);
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t pend;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
if (irq >= 16U)
|
||||
{
|
||||
pend = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* INTID 0-15 Software Generated Interrupt */
|
||||
pend = (GIC_DIST_SPENDSGI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL;
|
||||
/* No CPU identification offered */
|
||||
if (pend != 0U)
|
||||
{
|
||||
pend = 1U;
|
||||
}
|
||||
else
|
||||
{
|
||||
pend = 0U;
|
||||
}
|
||||
}
|
||||
|
||||
return (pend);
|
||||
}
|
||||
|
||||
void arm_gic_set_pending_irq(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
if (irq >= 16U)
|
||||
{
|
||||
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) = 1U << (irq % 32U);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* INTID 0-15 Software Generated Interrupt */
|
||||
/* Forward the interrupt to the CPU interface that requested it */
|
||||
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = (irq | 0x02000000U);
|
||||
}
|
||||
}
|
||||
|
||||
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
if (irq >= 16U)
|
||||
{
|
||||
mask = 1U << (irq % 32U);
|
||||
GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask = 1U << ((irq % 4U) * 8U);
|
||||
GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
}
|
||||
|
||||
void arm_gic_set_configuration(rt_uint64_t index, int irq, uint32_t config)
|
||||
{
|
||||
rt_uint64_t icfgr;
|
||||
rt_uint64_t shift;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
icfgr = GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq);
|
||||
shift = (irq % 16U) << 1U;
|
||||
|
||||
icfgr &= (~(3U << shift));
|
||||
icfgr |= (config << (shift + 1));
|
||||
|
||||
GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) = icfgr;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
return (GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) >> ((irq % 16U) >> 1U));
|
||||
}
|
||||
|
||||
void arm_gic_clear_active(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask = 1U << (irq % 32U);
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
|
||||
/* Set up the cpu mask for the specific interrupt */
|
||||
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask)
|
||||
{
|
||||
rt_uint64_t old_tgt;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq);
|
||||
|
||||
old_tgt &= ~(0x0FFUL << ((irq % 4U)*8U));
|
||||
old_tgt |= cpumask << ((irq % 4U)*8U);
|
||||
|
||||
GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
return (GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL;
|
||||
}
|
||||
|
||||
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority)
|
||||
{
|
||||
rt_uint64_t mask;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
mask = GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq);
|
||||
mask &= ~(0xFFUL << ((irq % 4U) * 8U));
|
||||
mask |= ((priority & 0xFFUL) << ((irq % 4U) * 8U));
|
||||
GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
return (GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL;
|
||||
}
|
||||
|
||||
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
/* set priority mask */
|
||||
GIC_CPU_PRIMASK(_gic_table[index].cpu_hw_base) = priority & 0xFFUL;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
return GIC_CPU_PRIMASK(_gic_table[index].cpu_hw_base);
|
||||
}
|
||||
|
||||
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point)
|
||||
{
|
||||
GIC_CPU_BINPOINT(_gic_table[index].cpu_hw_base) = binary_point & 0x7U;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index)
|
||||
{
|
||||
return GIC_CPU_BINPOINT(_gic_table[index].cpu_hw_base);
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t pending;
|
||||
rt_uint64_t active;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
active = (GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
|
||||
pending = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
|
||||
|
||||
return ((active << 1U) | pending);
|
||||
}
|
||||
|
||||
void arm_gic_send_sgi(rt_uint64_t index, int irq, rt_uint64_t target_list, rt_uint64_t filter_list)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) =
|
||||
((filter_list & 0x3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (irq & 0x0FUL);
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
return GIC_CPU_HIGHPRI(_gic_table[index].cpu_hw_base);
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
return GIC_CPU_IIDR(_gic_table[index].cpu_hw_base);
|
||||
}
|
||||
|
||||
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group)
|
||||
{
|
||||
uint32_t igroupr;
|
||||
uint32_t shift;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
RT_ASSERT(group <= 1U);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
igroupr = GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq);
|
||||
shift = (irq % 32U);
|
||||
igroupr &= (~(1U << shift));
|
||||
igroupr |= ((group & 0x1U) << shift);
|
||||
|
||||
GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) = igroupr;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0U);
|
||||
|
||||
return (GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL;
|
||||
}
|
||||
|
||||
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
|
||||
{
|
||||
unsigned int gic_type, i;
|
||||
rt_uint64_t cpumask = 1U << 0U;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
_gic_table[index].dist_hw_base = dist_base;
|
||||
_gic_table[index].offset = irq_start;
|
||||
|
||||
/* Find out how many interrupts are supported. */
|
||||
gic_type = GIC_DIST_TYPE(dist_base);
|
||||
_gic_max_irq = ((gic_type & 0x1fU) + 1U) * 32U;
|
||||
|
||||
/*
|
||||
* The GIC only supports up to 1020 interrupt sources.
|
||||
* Limit this to either the architected maximum, or the
|
||||
* platform maximum.
|
||||
*/
|
||||
if (_gic_max_irq > 1020U)
|
||||
{
|
||||
_gic_max_irq = 1020U;
|
||||
}
|
||||
if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */
|
||||
{
|
||||
_gic_max_irq = ARM_GIC_NR_IRQS;
|
||||
}
|
||||
|
||||
cpumask |= cpumask << 8U;
|
||||
cpumask |= cpumask << 16U;
|
||||
cpumask |= cpumask << 24U;
|
||||
|
||||
GIC_DIST_CTRL(dist_base) = 0x0U;
|
||||
|
||||
/* Set all global interrupts to be level triggered, active low. */
|
||||
for (i = 32U; i < _gic_max_irq; i += 16U)
|
||||
{
|
||||
GIC_DIST_CONFIG(dist_base, i) = 0x0U;
|
||||
}
|
||||
|
||||
/* Set all global interrupts to this CPU only. */
|
||||
for (i = 32U; i < _gic_max_irq; i += 4U)
|
||||
{
|
||||
GIC_DIST_TARGET(dist_base, i) = cpumask;
|
||||
}
|
||||
|
||||
/* Set priority on all interrupts. */
|
||||
for (i = 0U; i < _gic_max_irq; i += 4U)
|
||||
{
|
||||
GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0U;
|
||||
}
|
||||
|
||||
/* Disable all interrupts. */
|
||||
for (i = 0U; i < _gic_max_irq; i += 32U)
|
||||
{
|
||||
GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffffU;
|
||||
}
|
||||
|
||||
/* All interrupts defaults to IGROUP1(IRQ). */
|
||||
/*
|
||||
for (i = 0; i < _gic_max_irq; i += 32)
|
||||
{
|
||||
GIC_DIST_IGROUP(dist_base, i) = 0xffffffffU;
|
||||
}
|
||||
*/
|
||||
for (i = 0U; i < _gic_max_irq; i += 32U)
|
||||
{
|
||||
GIC_DIST_IGROUP(dist_base, i) = 0U;
|
||||
}
|
||||
|
||||
/* Enable group0 and group1 interrupt forwarding. */
|
||||
GIC_DIST_CTRL(dist_base) = 0x01U;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
if (!_gic_table[index].cpu_hw_base)
|
||||
{
|
||||
_gic_table[index].cpu_hw_base = cpu_base;
|
||||
}
|
||||
cpu_base = _gic_table[index].cpu_hw_base;
|
||||
|
||||
GIC_CPU_PRIMASK(cpu_base) = 0xf0U;
|
||||
GIC_CPU_BINPOINT(cpu_base) = 0x7U;
|
||||
/* Enable CPU interrupt */
|
||||
GIC_CPU_CTRL(cpu_base) = 0x01U;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arm_gic_dump_type(rt_uint64_t index)
|
||||
{
|
||||
unsigned int gic_type;
|
||||
|
||||
gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base);
|
||||
rt_kprintf("GICv%d on %p, max IRQs: %d, %s security extension(%08x)\n",
|
||||
(GIC_DIST_ICPIDR2(_gic_table[index].dist_hw_base) >> 4U) & 0xfUL,
|
||||
_gic_table[index].dist_hw_base,
|
||||
_gic_max_irq,
|
||||
gic_type & (1U << 10U) ? "has" : "no",
|
||||
gic_type);
|
||||
}
|
||||
|
||||
void arm_gic_dump(rt_uint64_t index)
|
||||
{
|
||||
unsigned int i, k;
|
||||
|
||||
k = GIC_CPU_HIGHPRI(_gic_table[index].cpu_hw_base);
|
||||
rt_kprintf("--- high pending priority: %d(%08x)\n", k, k);
|
||||
rt_kprintf("--- hw mask ---\n");
|
||||
for (i = 0U; i < _gic_max_irq / 32U; i++)
|
||||
{
|
||||
rt_kprintf("0x%08x, ",
|
||||
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base,
|
||||
i * 32U));
|
||||
}
|
||||
rt_kprintf("\n--- hw pending ---\n");
|
||||
for (i = 0U; i < _gic_max_irq / 32U; i++)
|
||||
{
|
||||
rt_kprintf("0x%08x, ",
|
||||
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base,
|
||||
i * 32U));
|
||||
}
|
||||
rt_kprintf("\n--- hw active ---\n");
|
||||
for (i = 0U; i < _gic_max_irq / 32U; i++)
|
||||
{
|
||||
rt_kprintf("0x%08x, ",
|
||||
GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base,
|
||||
i * 32U));
|
||||
}
|
||||
rt_kprintf("\n");
|
||||
}
|
||||
|
||||
long gic_dump(void)
|
||||
{
|
||||
arm_gic_dump_type(0);
|
||||
arm_gic_dump(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
MSH_CMD_EXPORT(gic_dump, show gic status);
|
||||
|
||||
#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV2) */
|
||||
856
RT_Thread/libcpu/aarch64/common/gicv3.c
Normal file
856
RT_Thread/libcpu/aarch64/common/gicv3.c
Normal file
@ -0,0 +1,856 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2013-07-20 Bernard first version
|
||||
* 2014-04-03 Grissiom many enhancements
|
||||
* 2018-11-22 Jesven add rt_hw_ipi_send()
|
||||
* add rt_hw_ipi_handler_install()
|
||||
* 2022-03-08 GuEe-GUI add BSP bind SPI CPU self support
|
||||
* add GICv3 AArch64 system register interface
|
||||
* modify arm_gic_redist_init() args
|
||||
* modify arm_gic_cpu_init() args
|
||||
* modify arm_gic_send_affinity_sgi() args
|
||||
* remove arm_gic_redist_address_set()
|
||||
* remove arm_gic_cpu_interface_address_set()
|
||||
* remove arm_gic_secondary_cpu_init()
|
||||
* remove get_main_cpu_affval()
|
||||
* remove arm_gic_cpumask_to_affval()
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3)
|
||||
|
||||
#include <gicv3.h>
|
||||
#include <cp15.h>
|
||||
|
||||
#include <board.h>
|
||||
|
||||
#ifndef ARM_SPI_BIND_CPU_ID
|
||||
#define ARM_SPI_BIND_CPU_ID 0
|
||||
#endif
|
||||
|
||||
#if !defined(RT_USING_SMP) && !defined(RT_USING_AMP)
|
||||
#define RT_CPUS_NR 1
|
||||
#else
|
||||
extern rt_uint64_t rt_cpu_mpidr_early[];
|
||||
#endif /* RT_USING_SMP */
|
||||
|
||||
/* 'ARM_GIC_MAX_NR' is the number of cores */
|
||||
static struct arm_gic _gic_table[ARM_GIC_MAX_NR];
|
||||
static unsigned int _gic_max_irq;
|
||||
|
||||
int arm_gic_get_active_irq(rt_uint64_t index)
|
||||
{
|
||||
rt_base_t irq;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
GET_GICV3_REG(ICC_IAR1_EL1, irq);
|
||||
|
||||
irq = (irq & 0x1ffffff) + _gic_table[index].offset;
|
||||
return irq;
|
||||
}
|
||||
|
||||
void arm_gic_ack(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
__DSB();
|
||||
SET_GICV3_REG(ICC_EOIR1_EL1, (rt_base_t)irq);
|
||||
}
|
||||
|
||||
void arm_gic_mask(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask = 1 << (irq % 32);
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
if (irq < 32)
|
||||
{
|
||||
rt_int32_t cpu_id = rt_hw_cpu_id();
|
||||
|
||||
GIC_RDISTSGI_ICENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask;
|
||||
}
|
||||
else
|
||||
{
|
||||
GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
}
|
||||
|
||||
void arm_gic_umask(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask = 1 << (irq % 32);
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
if (irq < 32)
|
||||
{
|
||||
rt_int32_t cpu_id = rt_hw_cpu_id();
|
||||
|
||||
GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask;
|
||||
}
|
||||
else
|
||||
{
|
||||
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t pend;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
if (irq >= 16)
|
||||
{
|
||||
pend = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* INTID 0-15 Software Generated Interrupt */
|
||||
pend = (GIC_DIST_SPENDSGI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
|
||||
/* No CPU identification offered */
|
||||
if (pend != 0)
|
||||
{
|
||||
pend = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
pend = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return pend;
|
||||
}
|
||||
|
||||
void arm_gic_set_pending_irq(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
if (irq >= 16)
|
||||
{
|
||||
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) = 1 << (irq % 32);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* INTID 0-15 Software Generated Interrupt */
|
||||
/* Forward the interrupt to the CPU interface that requested it */
|
||||
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = (irq | 0x02000000);
|
||||
}
|
||||
}
|
||||
|
||||
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
if (irq >= 16)
|
||||
{
|
||||
mask = 1 << (irq % 32);
|
||||
GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask = 1 << ((irq % 4) * 8);
|
||||
GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
}
|
||||
|
||||
void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config)
|
||||
{
|
||||
rt_uint64_t icfgr;
|
||||
rt_uint64_t shift;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
icfgr = GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq);
|
||||
shift = (irq % 16) << 1;
|
||||
|
||||
icfgr &= (~(3 << shift));
|
||||
icfgr |= (config << (shift + 1));
|
||||
|
||||
GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) = icfgr;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
return (GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) >> ((irq % 16) >> 1));
|
||||
}
|
||||
|
||||
void arm_gic_clear_active(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t mask = 1 << (irq % 32);
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
|
||||
void arm_gic_set_router_cpu(rt_uint64_t index, int irq, rt_uint64_t aff)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 32);
|
||||
|
||||
GIC_DIST_IROUTER(_gic_table[index].dist_hw_base, irq) = aff & 0xff00ffffffULL;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_router_cpu(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 32);
|
||||
|
||||
return GIC_DIST_IROUTER(_gic_table[index].dist_hw_base, irq);
|
||||
}
|
||||
|
||||
/* Set up the cpu mask for the specific interrupt */
|
||||
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask)
|
||||
{
|
||||
rt_uint64_t old_tgt;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq);
|
||||
|
||||
old_tgt &= ~(0x0ff << ((irq % 4) * 8));
|
||||
old_tgt |= cpumask << ((irq % 4) * 8);
|
||||
|
||||
GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
return (GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
|
||||
}
|
||||
|
||||
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority)
|
||||
{
|
||||
rt_uint64_t mask;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
if (irq < 32)
|
||||
{
|
||||
rt_int32_t cpu_id = rt_hw_cpu_id();
|
||||
|
||||
mask = GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq);
|
||||
mask &= ~(0xffUL << ((irq % 4) * 8));
|
||||
mask |= ((priority & 0xff) << ((irq % 4) * 8));
|
||||
GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) = mask;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask = GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq);
|
||||
mask &= ~(0xff << ((irq % 4) * 8));
|
||||
mask |= ((priority & 0xff) << ((irq % 4) * 8));
|
||||
GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) = mask;
|
||||
}
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
if (irq < 32)
|
||||
{
|
||||
rt_int32_t cpu_id = rt_hw_cpu_id();
|
||||
|
||||
return (GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) >> ((irq % 4) * 8)) & 0xff;
|
||||
}
|
||||
else
|
||||
{
|
||||
return (GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
|
||||
}
|
||||
}
|
||||
|
||||
void arm_gic_set_system_register_enable_mask(rt_uint64_t index, rt_uint64_t value)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
value &= 0xff;
|
||||
/* set priority mask */
|
||||
SET_GICV3_REG(ICC_SRE_EL1, value);
|
||||
__ISB();
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_system_register_enable_mask(rt_uint64_t index)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
rt_uint64_t value;
|
||||
|
||||
GET_GICV3_REG(ICC_SRE_EL1, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
priority &= 0xff;
|
||||
/* set priority mask */
|
||||
SET_GICV3_REG(ICC_PMR_EL1, priority);
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
rt_uint64_t priority;
|
||||
|
||||
GET_GICV3_REG(ICC_PMR_EL1, priority);
|
||||
return priority;
|
||||
}
|
||||
|
||||
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point)
|
||||
{
|
||||
RT_UNUSED(index);
|
||||
binary_point &= 0x7;
|
||||
|
||||
SET_GICV3_REG(ICC_BPR1_EL1, binary_point);
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index)
|
||||
{
|
||||
rt_uint64_t binary_point;
|
||||
|
||||
RT_UNUSED(index);
|
||||
GET_GICV3_REG(ICC_BPR1_EL1, binary_point);
|
||||
return binary_point;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq)
|
||||
{
|
||||
rt_uint64_t pending, active;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
active = (GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
|
||||
pending = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
|
||||
|
||||
return ((active << 1) | pending);
|
||||
}
|
||||
|
||||
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
|
||||
struct gicv3_sgi_aff
|
||||
{
|
||||
rt_uint64_t aff;
|
||||
rt_uint32_t cpu_mask[(RT_CPUS_NR + 31) >> 5];
|
||||
rt_uint16_t target_list;
|
||||
};
|
||||
|
||||
static struct gicv3_sgi_aff sgi_aff_table[RT_CPUS_NR];
|
||||
static rt_uint64_t sgi_aff_table_num;
|
||||
static void sgi_aff_add_table(rt_uint64_t aff, rt_uint64_t cpu_index)
|
||||
{
|
||||
rt_uint64_t i;
|
||||
|
||||
for (i = 0; i < sgi_aff_table_num; i++)
|
||||
{
|
||||
if (sgi_aff_table[i].aff == aff)
|
||||
{
|
||||
sgi_aff_table[i].cpu_mask[cpu_index >> 5] |= (1 << (cpu_index & 0x1F));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
sgi_aff_table[sgi_aff_table_num].aff = aff;
|
||||
sgi_aff_table[sgi_aff_table_num].cpu_mask[cpu_index >> 5] |= (1 << (cpu_index & 0x1F));
|
||||
sgi_aff_table_num++;
|
||||
}
|
||||
|
||||
static rt_uint64_t gicv3_sgi_init(void)
|
||||
{
|
||||
rt_uint64_t i, icc_sgi1r_value;
|
||||
|
||||
for (i = 0; i < RT_CPUS_NR; i++)
|
||||
{
|
||||
icc_sgi1r_value = (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 8) & 0xFF) << 16;
|
||||
icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 16) & 0xFF) << 32;
|
||||
icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 32) & 0xFF) << 48;
|
||||
icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 4) & 0xF) << 44;
|
||||
sgi_aff_add_table(icc_sgi1r_value, i);
|
||||
}
|
||||
|
||||
return (RT_CPUS_NR + 31) >> 5;
|
||||
}
|
||||
|
||||
rt_inline void gicv3_sgi_send(rt_uint64_t int_id)
|
||||
{
|
||||
rt_uint64_t i;
|
||||
for (i = 0; i < sgi_aff_table_num; i++)
|
||||
{
|
||||
if (sgi_aff_table[i].target_list)
|
||||
{
|
||||
__DSB();
|
||||
/* Interrupts routed to the PEs specified by Aff3.Aff2.Aff1.<target list>. */
|
||||
SET_GICV3_REG(ICC_SGI1R_EL1, sgi_aff_table[i].aff | int_id | sgi_aff_table[i].target_list);
|
||||
__ISB();
|
||||
sgi_aff_table[i].target_list = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rt_inline void gicv3_sgi_target_list_set(rt_uint64_t array, rt_uint32_t cpu_mask)
|
||||
{
|
||||
rt_uint64_t i, value;
|
||||
|
||||
for (i = 0; i < sgi_aff_table_num; i++)
|
||||
{
|
||||
if (sgi_aff_table[i].cpu_mask[array] & cpu_mask)
|
||||
{
|
||||
while (cpu_mask)
|
||||
{
|
||||
value = __builtin_ctzl(cpu_mask);
|
||||
cpu_mask &= ~(1 << value);
|
||||
sgi_aff_table[i].target_list |= 1 << (rt_cpu_mpidr_early[(array << 5) | value] & 0xF);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode)
|
||||
{
|
||||
rt_uint64_t i;
|
||||
rt_uint64_t int_id = (irq & 0xf) << 24;
|
||||
static rt_uint64_t masks_nrs = 0;
|
||||
|
||||
if (routing_mode == GICV3_ROUTED_TO_SPEC)
|
||||
{
|
||||
if (!masks_nrs)
|
||||
{
|
||||
masks_nrs = gicv3_sgi_init();
|
||||
}
|
||||
|
||||
for (i = 0; i < masks_nrs; i++)
|
||||
{
|
||||
if (cpu_masks[i] == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
gicv3_sgi_target_list_set(i, cpu_masks[i]);
|
||||
}
|
||||
|
||||
gicv3_sgi_send(int_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
__DSB();
|
||||
/* Interrupts routed to all PEs in the system, excluding "self". */
|
||||
SET_GICV3_REG(ICC_SGI1R_EL1, (0x10000000000ULL) | int_id);
|
||||
__ISB();
|
||||
}
|
||||
}
|
||||
#endif /* defined(RT_USING_SMP) || defined(RT_USING_AMP) */
|
||||
|
||||
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index)
|
||||
{
|
||||
rt_uint64_t irq;
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
RT_UNUSED(index);
|
||||
GET_GICV3_REG(ICC_HPPIR1_EL1, irq);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index)
|
||||
{
|
||||
rt_uint64_t ret = 0;
|
||||
rt_base_t level;
|
||||
int cpuid;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
level = rt_hw_local_irq_disable();
|
||||
cpuid = rt_hw_cpu_id();
|
||||
if (_gic_table[index].cpu_hw_base[cpuid] != RT_NULL)
|
||||
{
|
||||
ret = GIC_CPU_IIDR(_gic_table[index].cpu_hw_base[cpuid]);
|
||||
}
|
||||
rt_hw_local_irq_enable(level);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group)
|
||||
{
|
||||
rt_uint64_t igroupr;
|
||||
rt_uint64_t shift;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
RT_ASSERT(group <= 1);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
igroupr = GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq);
|
||||
shift = (irq % 32);
|
||||
igroupr &= (~(1U << shift));
|
||||
igroupr |= ((group & 0x1U) << shift);
|
||||
|
||||
GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) = igroupr;
|
||||
}
|
||||
|
||||
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq)
|
||||
{
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
irq = irq - _gic_table[index].offset;
|
||||
RT_ASSERT(irq >= 0);
|
||||
|
||||
return (GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1UL;
|
||||
}
|
||||
|
||||
static int arm_gicv3_wait_rwp(rt_uint64_t index, rt_uint64_t irq)
|
||||
{
|
||||
rt_uint64_t rwp_bit;
|
||||
rt_uint64_t base;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
if (irq < 32)
|
||||
{
|
||||
rt_int32_t cpu_id = rt_hw_cpu_id();
|
||||
|
||||
base = _gic_table[index].redist_hw_base[cpu_id];
|
||||
rwp_bit = GICR_CTLR_RWP;
|
||||
}
|
||||
else
|
||||
{
|
||||
base = _gic_table[index].dist_hw_base;
|
||||
rwp_bit = GICD_CTLR_RWP;
|
||||
}
|
||||
|
||||
while (HWREG32(base) & rwp_bit)
|
||||
{
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
|
||||
{
|
||||
int i;
|
||||
unsigned int gic_type;
|
||||
rt_uint64_t main_cpu_affinity_val;
|
||||
|
||||
RT_UNUSED(i);
|
||||
RT_UNUSED(main_cpu_affinity_val);
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
_gic_table[index].dist_hw_base = dist_base;
|
||||
_gic_table[index].offset = irq_start;
|
||||
|
||||
|
||||
/* Find out how many interrupts are supported. */
|
||||
gic_type = GIC_DIST_TYPE(dist_base);
|
||||
_gic_max_irq = ((gic_type & 0x1f) + 1) * 32;
|
||||
|
||||
/*
|
||||
* The GIC only supports up to 1020 interrupt sources.
|
||||
* Limit this to either the architected maximum, or the
|
||||
* platform maximum.
|
||||
*/
|
||||
if (_gic_max_irq > 1020)
|
||||
{
|
||||
_gic_max_irq = 1020;
|
||||
}
|
||||
if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */
|
||||
{
|
||||
_gic_max_irq = ARM_GIC_NR_IRQS;
|
||||
}
|
||||
|
||||
#ifndef RT_AMP_SLAVE
|
||||
|
||||
GIC_DIST_CTRL(dist_base) = 0;
|
||||
/* Wait for register write pending */
|
||||
arm_gicv3_wait_rwp(0, 32);
|
||||
|
||||
/* Set all global interrupts to be level triggered, active low. */
|
||||
for (i = 32; i < _gic_max_irq; i += 16)
|
||||
{
|
||||
GIC_DIST_CONFIG(dist_base, i) = 0;
|
||||
}
|
||||
|
||||
arm_gicv3_wait_rwp(0, 32);
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
main_cpu_affinity_val = rt_cpu_mpidr_early[ARM_SPI_BIND_CPU_ID];
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, mpidr_el1":"=r"(main_cpu_affinity_val));
|
||||
#endif
|
||||
|
||||
/* aff3[39:32], aff2[23:16], aff1[15:8], aff0[7:0] */
|
||||
main_cpu_affinity_val &= 0xff00ffffffULL;
|
||||
|
||||
/* Set all global interrupts to this CPU only. */
|
||||
for (i = 32; i < _gic_max_irq; i++)
|
||||
{
|
||||
GIC_DIST_IROUTER(dist_base, i) = main_cpu_affinity_val | (GICV3_ROUTED_TO_SPEC << 31);
|
||||
}
|
||||
|
||||
arm_gicv3_wait_rwp(0, 32);
|
||||
|
||||
/* Set priority on spi interrupts. */
|
||||
for (i = 32; i < _gic_max_irq; i += 4)
|
||||
{
|
||||
GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0;
|
||||
}
|
||||
|
||||
arm_gicv3_wait_rwp(0, 32);
|
||||
/* Disable all interrupts. */
|
||||
for (i = 0; i < _gic_max_irq; i += 32)
|
||||
{
|
||||
GIC_DIST_PENDING_CLEAR(dist_base, i) = 0xffffffff;
|
||||
GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffff;
|
||||
}
|
||||
|
||||
arm_gicv3_wait_rwp(0, 32);
|
||||
/* All interrupts defaults to IGROUP1(IRQ). */
|
||||
for (i = 0; i < _gic_max_irq; i += 32)
|
||||
{
|
||||
GIC_DIST_IGROUP(dist_base, i) = 0xffffffff;
|
||||
}
|
||||
|
||||
arm_gicv3_wait_rwp(0, 32);
|
||||
|
||||
/*
|
||||
* The Distributor control register (GICD_CTLR) must be configured to enable the interrupt groups and to set the routing mode.
|
||||
* Enable Affinity routing (ARE bits) The ARE bits in GICD_CTLR control whether affinity routing is enabled.
|
||||
* If affinity routing is not enabled, GICv3 can be configured for legacy operation.
|
||||
* Whether affinity routing is enabled or not can be controlled separately for Secure and Non-secure state.
|
||||
* Enables GICD_CTLR contains separate enable bits for Group 0, Secure Group 1 and Non-secure Group 1:
|
||||
* GICD_CTLR.EnableGrp1S enables distribution of Secure Group 1 interrupts.
|
||||
* GICD_CTLR.EnableGrp1NS enables distribution of Non-secure Group 1 interrupts.
|
||||
* GICD_CTLR.EnableGrp0 enables distribution of Group 0 interrupts.
|
||||
*/
|
||||
GIC_DIST_CTRL(dist_base) = GICD_CTLR_ARE_NS | GICD_CTLR_ENGRP1NS;
|
||||
|
||||
#endif /* RT_AMP_SLAVE */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base)
|
||||
{
|
||||
int i;
|
||||
int cpu_id = rt_hw_cpu_id();
|
||||
static int master_cpu_id = -1;
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
if (master_cpu_id < 0)
|
||||
{
|
||||
master_cpu_id = 0;
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, &master_cpu_id, sizeof(master_cpu_id));
|
||||
}
|
||||
|
||||
if (!_gic_table[index].redist_hw_base[master_cpu_id])
|
||||
{
|
||||
_gic_table[index].redist_hw_base[master_cpu_id] = redist_base;
|
||||
}
|
||||
redist_base = _gic_table[index].redist_hw_base[master_cpu_id];
|
||||
|
||||
redist_base += cpu_id * (2 << 16);
|
||||
_gic_table[index].redist_hw_base[cpu_id] = redist_base;
|
||||
|
||||
/* redistributor enable */
|
||||
GIC_RDIST_WAKER(redist_base) &= ~(1 << 1);
|
||||
while (GIC_RDIST_WAKER(redist_base) & (1 << 2))
|
||||
{
|
||||
}
|
||||
|
||||
/* Disable all sgi and ppi interrupt */
|
||||
GIC_RDISTSGI_ICENABLER0(redist_base) = 0xffffffff;
|
||||
arm_gicv3_wait_rwp(0, 0);
|
||||
|
||||
/* Clear all inetrrupt pending */
|
||||
GIC_RDISTSGI_ICPENDR0(redist_base) = 0xffffffff;
|
||||
|
||||
/* the corresponding interrupt is Group 1 or Non-secure Group 1. */
|
||||
GIC_RDISTSGI_IGROUPR0(redist_base, 0) = 0xffffffff;
|
||||
GIC_RDISTSGI_IGRPMODR0(redist_base, 0) = 0xffffffff;
|
||||
|
||||
/* Configure default priorities for SGI 0:15 and PPI 16:31. */
|
||||
for (i = 0; i < 32; i += 4)
|
||||
{
|
||||
GIC_RDISTSGI_IPRIORITYR(redist_base, i) = 0xa0a0a0a0U;
|
||||
}
|
||||
|
||||
/* Trigger level for PPI interrupts*/
|
||||
GIC_RDISTSGI_ICFGR1(redist_base) = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base)
|
||||
{
|
||||
rt_uint64_t value;
|
||||
int cpu_id = rt_hw_cpu_id();
|
||||
|
||||
RT_ASSERT(index < ARM_GIC_MAX_NR);
|
||||
|
||||
_gic_table[index].cpu_hw_base[cpu_id] = cpu_base;
|
||||
|
||||
value = arm_gic_get_system_register_enable_mask(index);
|
||||
value |= (1 << 0);
|
||||
arm_gic_set_system_register_enable_mask(index, value);
|
||||
SET_GICV3_REG(ICC_CTLR_EL1, 0l);
|
||||
|
||||
arm_gic_set_interface_prior_mask(index, 0xff);
|
||||
|
||||
/* Enable group1 interrupt */
|
||||
value = 1;
|
||||
SET_GICV3_REG(ICC_IGRPEN1_EL1, value);
|
||||
|
||||
arm_gic_set_binary_point(0, 0);
|
||||
|
||||
/* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts. */
|
||||
value = 1; /* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts.*/
|
||||
value |= 1 << 18; /* Targeted SGIs with affinity level 0 values of 0 - 255 are supported. */
|
||||
SET_GICV3_REG(ICC_CTLR_EL1, value);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arm_gic_dump_type(rt_uint64_t index)
|
||||
{
|
||||
unsigned int gic_type;
|
||||
unsigned int gic_version;
|
||||
unsigned int gic_rp;
|
||||
|
||||
gic_version = (GIC_DIST_IIDR(_gic_table[index].dist_hw_base) >> 24) & 0xfUL;
|
||||
gic_rp = (GIC_DIST_IIDR(_gic_table[index].dist_hw_base) >> 12) & 0xfUL;
|
||||
gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base);
|
||||
rt_kprintf("GICv3-%d r%dp%d on %p, max IRQs: %d, %s security extension(%08x)\n",
|
||||
(gic_version == 0) ? 500 : (gic_version == 2) ? 600 : 0,
|
||||
(gic_rp >> 4) & 0xF,
|
||||
gic_rp & 0xF,
|
||||
_gic_table[index].dist_hw_base,
|
||||
_gic_max_irq,
|
||||
gic_type & (1U << 10U) ? "has" : "no",
|
||||
gic_type);
|
||||
}
|
||||
|
||||
void arm_gic_dump(rt_uint64_t index)
|
||||
{
|
||||
int i;
|
||||
unsigned int val;
|
||||
|
||||
val = arm_gic_get_high_pending_irq(0);
|
||||
rt_kprintf("--- high pending priority: %d(%08x)\n", val, val);
|
||||
|
||||
rt_kprintf("--- hw mask ---\n");
|
||||
for (i = 0; i < _gic_max_irq / 32; ++i)
|
||||
{
|
||||
rt_kprintf("0x%08x, ", GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, i * 32));
|
||||
}
|
||||
|
||||
rt_kprintf("\b\b\n--- hw pending ---\n");
|
||||
for (i = 0; i < _gic_max_irq / 32; ++i)
|
||||
{
|
||||
rt_kprintf("0x%08x, ", GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, i * 32));
|
||||
}
|
||||
|
||||
rt_kprintf("\b\b\n--- hw active ---\n");
|
||||
for (i = 0; i < _gic_max_irq / 32; ++i)
|
||||
{
|
||||
rt_kprintf("0x%08x, ", GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, i * 32));
|
||||
}
|
||||
|
||||
rt_kprintf("\b\b\n");
|
||||
}
|
||||
|
||||
static void arm_gic_bind_dump(void)
|
||||
{
|
||||
#ifdef BSP_USING_GICV3
|
||||
int i;
|
||||
for (i = 32; i < _gic_max_irq; i++)
|
||||
{
|
||||
rt_kprintf("irq(%d) -> 0x%X\n", i, arm_gic_get_router_cpu(0, i));
|
||||
}
|
||||
#endif /* BSP_USING_GICV3 */
|
||||
}
|
||||
|
||||
rt_uint64_t *arm_gic_get_gic_table_addr(void)
|
||||
{
|
||||
return (rt_uint64_t *)&_gic_table[0];
|
||||
}
|
||||
|
||||
static void arm_gic_sgi_dump(rt_uint64_t index)
|
||||
{
|
||||
rt_int32_t cpu_id = rt_hw_cpu_id();
|
||||
|
||||
rt_kprintf("redist_hw_base = 0x%X\n", _gic_table[index].redist_hw_base[cpu_id]);
|
||||
rt_kprintf("--- sgi mask ---\n");
|
||||
rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]));
|
||||
rt_kprintf("--- sgi pending ---\n");
|
||||
rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISPENDR0(_gic_table[index].redist_hw_base[cpu_id]));
|
||||
rt_kprintf("--- sgi active ---\n");
|
||||
rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISACTIVER0(_gic_table[index].redist_hw_base[cpu_id]));
|
||||
}
|
||||
|
||||
long gic_dump(void)
|
||||
{
|
||||
arm_gic_dump_type(0);
|
||||
arm_gic_dump(0);
|
||||
arm_gic_bind_dump();
|
||||
arm_gic_sgi_dump(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
MSH_CMD_EXPORT(gic_dump, show gic status);
|
||||
|
||||
#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) */
|
||||
55
RT_Thread/libcpu/aarch64/common/gtimer.c
Normal file
55
RT_Thread/libcpu/aarch64/common/gtimer.c
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2011-12-20 GuEe-GUI first version
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <rthw.h>
|
||||
#include <gtimer.h>
|
||||
#include <cpuport.h>
|
||||
|
||||
#ifdef RT_USING_KTIME
|
||||
#include <ktime.h>
|
||||
#endif
|
||||
|
||||
#define EL1_PHY_TIMER_IRQ_NUM 30
|
||||
|
||||
static volatile rt_uint64_t timer_step;
|
||||
|
||||
static void rt_hw_timer_isr(int vector, void *parameter)
|
||||
{
|
||||
rt_hw_set_gtimer_val(timer_step);
|
||||
rt_tick_increase();
|
||||
}
|
||||
|
||||
void rt_hw_gtimer_init(void)
|
||||
{
|
||||
rt_hw_interrupt_install(EL1_PHY_TIMER_IRQ_NUM, rt_hw_timer_isr, RT_NULL, "tick");
|
||||
rt_hw_isb();
|
||||
timer_step = rt_hw_get_gtimer_frq();
|
||||
rt_hw_dsb();
|
||||
timer_step /= RT_TICK_PER_SECOND;
|
||||
rt_hw_gtimer_local_enable();
|
||||
}
|
||||
|
||||
void rt_hw_gtimer_local_enable(void)
|
||||
{
|
||||
rt_hw_gtimer_disable();
|
||||
rt_hw_set_gtimer_val(timer_step);
|
||||
rt_hw_interrupt_umask(EL1_PHY_TIMER_IRQ_NUM);
|
||||
#ifdef RT_USING_KTIME
|
||||
rt_ktime_cputimer_init();
|
||||
#endif
|
||||
rt_hw_gtimer_enable();
|
||||
}
|
||||
|
||||
void rt_hw_gtimer_local_disable(void)
|
||||
{
|
||||
rt_hw_gtimer_disable();
|
||||
rt_hw_interrupt_mask(EL1_PHY_TIMER_IRQ_NUM);
|
||||
}
|
||||
16
RT_Thread/libcpu/aarch64/common/hypercall.c
Normal file
16
RT_Thread/libcpu/aarch64/common/hypercall.c
Normal file
@ -0,0 +1,16 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2022-02-24 GuEe-GUI first version
|
||||
*/
|
||||
|
||||
#include <hypercall.h>
|
||||
|
||||
rt_err_t rt_hv_stage2_map(unsigned long paddr, unsigned long size)
|
||||
{
|
||||
return rt_hw_hypercall(120, paddr & (~4095), (paddr & (~4095)) + size, (1 << 0) | (1 << 1) | (1 << 4), 0, 0, 0, 0);
|
||||
}
|
||||
187
RT_Thread/libcpu/aarch64/common/include/armv8.h
Normal file
187
RT_Thread/libcpu/aarch64/common/include/armv8.h
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2011-09-15 Bernard first version
|
||||
*/
|
||||
|
||||
#ifndef __ARMV8_H__
|
||||
#define __ARMV8_H__
|
||||
|
||||
#include <rtconfig.h>
|
||||
|
||||
#ifdef ARCH_USING_HW_THREAD_SELF
|
||||
#define ARM64_THREAD_REG tpidr_el1
|
||||
#endif /* ARCH_USING_HW_THREAD_SELF */
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
/*********************
|
||||
* CONTEXT_OFFSET *
|
||||
*********************/
|
||||
|
||||
#define CONTEXT_OFFSET_ELR_EL1 0x0
|
||||
#define CONTEXT_OFFSET_SPSR_EL1 0x8
|
||||
#define CONTEXT_OFFSET_SP_EL0 0x10
|
||||
#define CONTEXT_OFFSET_X30 0x18
|
||||
#define CONTEXT_OFFSET_FPCR 0x20
|
||||
#define CONTEXT_OFFSET_FPSR 0x28
|
||||
#define CONTEXT_OFFSET_X28 0x30
|
||||
#define CONTEXT_OFFSET_X29 0x38
|
||||
#define CONTEXT_OFFSET_X26 0x40
|
||||
#define CONTEXT_OFFSET_X27 0x48
|
||||
#define CONTEXT_OFFSET_X24 0x50
|
||||
#define CONTEXT_OFFSET_X25 0x58
|
||||
#define CONTEXT_OFFSET_X22 0x60
|
||||
#define CONTEXT_OFFSET_X23 0x68
|
||||
#define CONTEXT_OFFSET_X20 0x70
|
||||
#define CONTEXT_OFFSET_X21 0x78
|
||||
#define CONTEXT_OFFSET_X18 0x80
|
||||
#define CONTEXT_OFFSET_X19 0x88
|
||||
#define CONTEXT_OFFSET_X16 0x90
|
||||
#define CONTEXT_OFFSET_X17 0x98
|
||||
#define CONTEXT_OFFSET_X14 0xa0
|
||||
#define CONTEXT_OFFSET_X15 0xa8
|
||||
#define CONTEXT_OFFSET_X12 0xb0
|
||||
#define CONTEXT_OFFSET_X13 0xb8
|
||||
#define CONTEXT_OFFSET_X10 0xc0
|
||||
#define CONTEXT_OFFSET_X11 0xc8
|
||||
#define CONTEXT_OFFSET_X8 0xd0
|
||||
#define CONTEXT_OFFSET_X9 0xd8
|
||||
#define CONTEXT_OFFSET_X6 0xe0
|
||||
#define CONTEXT_OFFSET_X7 0xe8
|
||||
#define CONTEXT_OFFSET_X4 0xf0
|
||||
#define CONTEXT_OFFSET_X5 0xf8
|
||||
#define CONTEXT_OFFSET_X2 0x100
|
||||
#define CONTEXT_OFFSET_X3 0x108
|
||||
#define CONTEXT_OFFSET_X0 0x110
|
||||
#define CONTEXT_OFFSET_X1 0x118
|
||||
|
||||
#define CONTEXT_OFFSET_Q31 0x120
|
||||
#define CONTEXT_OFFSET_Q30 0x130
|
||||
#define CONTEXT_OFFSET_Q29 0x140
|
||||
#define CONTEXT_OFFSET_Q28 0x150
|
||||
#define CONTEXT_OFFSET_Q27 0x160
|
||||
#define CONTEXT_OFFSET_Q26 0x170
|
||||
#define CONTEXT_OFFSET_Q25 0x180
|
||||
#define CONTEXT_OFFSET_Q24 0x190
|
||||
#define CONTEXT_OFFSET_Q23 0x1a0
|
||||
#define CONTEXT_OFFSET_Q22 0x1b0
|
||||
#define CONTEXT_OFFSET_Q21 0x1c0
|
||||
#define CONTEXT_OFFSET_Q20 0x1d0
|
||||
#define CONTEXT_OFFSET_Q19 0x1e0
|
||||
#define CONTEXT_OFFSET_Q18 0x1f0
|
||||
#define CONTEXT_OFFSET_Q17 0x200
|
||||
#define CONTEXT_OFFSET_Q16 0x210
|
||||
#define CONTEXT_OFFSET_Q15 0x220
|
||||
#define CONTEXT_OFFSET_Q14 0x230
|
||||
#define CONTEXT_OFFSET_Q13 0x240
|
||||
#define CONTEXT_OFFSET_Q12 0x250
|
||||
#define CONTEXT_OFFSET_Q11 0x260
|
||||
#define CONTEXT_OFFSET_Q10 0x270
|
||||
#define CONTEXT_OFFSET_Q9 0x280
|
||||
#define CONTEXT_OFFSET_Q8 0x290
|
||||
#define CONTEXT_OFFSET_Q7 0x2a0
|
||||
#define CONTEXT_OFFSET_Q6 0x2b0
|
||||
#define CONTEXT_OFFSET_Q5 0x2c0
|
||||
#define CONTEXT_OFFSET_Q4 0x2d0
|
||||
#define CONTEXT_OFFSET_Q3 0x2e0
|
||||
#define CONTEXT_OFFSET_Q2 0x2f0
|
||||
#define CONTEXT_OFFSET_Q1 0x300
|
||||
#define CONTEXT_OFFSET_Q0 0x310
|
||||
|
||||
#define CONTEXT_FPU_SIZE (32 * 16)
|
||||
#define CONTEXT_SIZE (0x120 + CONTEXT_FPU_SIZE)
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#include <rttypes.h>
|
||||
|
||||
typedef struct { rt_uint64_t value[2]; } rt_uint128_t;
|
||||
|
||||
/* the exception stack without VFP registers */
|
||||
struct rt_hw_exp_stack
|
||||
{
|
||||
rt_uint64_t pc;
|
||||
rt_uint64_t cpsr;
|
||||
rt_uint64_t sp_el0;
|
||||
rt_uint64_t x30;
|
||||
rt_uint64_t fpcr;
|
||||
rt_uint64_t fpsr;
|
||||
rt_uint64_t x28;
|
||||
rt_uint64_t x29;
|
||||
rt_uint64_t x26;
|
||||
rt_uint64_t x27;
|
||||
rt_uint64_t x24;
|
||||
rt_uint64_t x25;
|
||||
rt_uint64_t x22;
|
||||
rt_uint64_t x23;
|
||||
rt_uint64_t x20;
|
||||
rt_uint64_t x21;
|
||||
rt_uint64_t x18;
|
||||
rt_uint64_t x19;
|
||||
rt_uint64_t x16;
|
||||
rt_uint64_t x17;
|
||||
rt_uint64_t x14;
|
||||
rt_uint64_t x15;
|
||||
rt_uint64_t x12;
|
||||
rt_uint64_t x13;
|
||||
rt_uint64_t x10;
|
||||
rt_uint64_t x11;
|
||||
rt_uint64_t x8;
|
||||
rt_uint64_t x9;
|
||||
rt_uint64_t x6;
|
||||
rt_uint64_t x7;
|
||||
rt_uint64_t x4;
|
||||
rt_uint64_t x5;
|
||||
rt_uint64_t x2;
|
||||
rt_uint64_t x3;
|
||||
rt_uint64_t x0;
|
||||
rt_uint64_t x1;
|
||||
|
||||
rt_uint128_t fpu[32];
|
||||
};
|
||||
|
||||
void rt_hw_show_register(struct rt_hw_exp_stack *regs);
|
||||
|
||||
#define SP_ELx ((unsigned long)0x01)
|
||||
#define SP_EL0 ((unsigned long)0x00)
|
||||
#define PSTATE_EL1 ((unsigned long)0x04)
|
||||
#define PSTATE_EL2 ((unsigned long)0x08)
|
||||
#define PSTATE_EL3 ((unsigned long)0x0c)
|
||||
|
||||
rt_ubase_t rt_hw_get_current_el(void);
|
||||
void rt_hw_set_elx_env(void);
|
||||
void rt_hw_set_current_vbar(rt_ubase_t addr);
|
||||
|
||||
/* ESR:generic */
|
||||
#define ARM64_ABORT_WNR(esr) ((esr) & 0x40)
|
||||
#define ARM64_ESR_EXTRACT_EC(esr) ((((esr) >> 26) & 0x3fU))
|
||||
#define ARM64_ESR_EXTRACT_FSC(esr) ((esr) & 0x3f)
|
||||
|
||||
/* ESR:EC */
|
||||
#define ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION (0b100000)
|
||||
#define ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE (0b100001)
|
||||
#define ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION (0b100100)
|
||||
#define ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE (0b100101)
|
||||
|
||||
/* ESR:FSC */
|
||||
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_0 (0b000100)
|
||||
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_1 (0b000101)
|
||||
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_2 (0b000110)
|
||||
#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_3 (0b000111)
|
||||
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_0 (0b001100)
|
||||
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_1 (0b001101)
|
||||
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_2 (0b001110)
|
||||
#define ARM64_FSC_PERMISSION_FAULT_LEVEL_3 (0b001111)
|
||||
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0 (0b001000)
|
||||
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1 (0b001001)
|
||||
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2 (0b001010)
|
||||
#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3 (0b001011)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
83
RT_Thread/libcpu/aarch64/common/include/asm-fpu.h
Normal file
83
RT_Thread/libcpu/aarch64/common/include/asm-fpu.h
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-18 Jesven the first version
|
||||
* 2023-07-13 GuEe-GUI append Q16 ~ Q31
|
||||
*/
|
||||
#ifndef __ARM64_ASM_FPU_H__
|
||||
#define __ARM64_ASM_FPU_H__
|
||||
|
||||
.macro SAVE_FPU, reg
|
||||
str q0, [\reg, #-0x10]!
|
||||
str q1, [\reg, #-0x10]!
|
||||
str q2, [\reg, #-0x10]!
|
||||
str q3, [\reg, #-0x10]!
|
||||
str q4, [\reg, #-0x10]!
|
||||
str q5, [\reg, #-0x10]!
|
||||
str q6, [\reg, #-0x10]!
|
||||
str q7, [\reg, #-0x10]!
|
||||
str q8, [\reg, #-0x10]!
|
||||
str q9, [\reg, #-0x10]!
|
||||
str q10, [\reg, #-0x10]!
|
||||
str q11, [\reg, #-0x10]!
|
||||
str q12, [\reg, #-0x10]!
|
||||
str q13, [\reg, #-0x10]!
|
||||
str q14, [\reg, #-0x10]!
|
||||
str q15, [\reg, #-0x10]!
|
||||
str q16, [\reg, #-0x10]!
|
||||
str q17, [\reg, #-0x10]!
|
||||
str q18, [\reg, #-0x10]!
|
||||
str q19, [\reg, #-0x10]!
|
||||
str q20, [\reg, #-0x10]!
|
||||
str q21, [\reg, #-0x10]!
|
||||
str q22, [\reg, #-0x10]!
|
||||
str q23, [\reg, #-0x10]!
|
||||
str q24, [\reg, #-0x10]!
|
||||
str q25, [\reg, #-0x10]!
|
||||
str q26, [\reg, #-0x10]!
|
||||
str q27, [\reg, #-0x10]!
|
||||
str q28, [\reg, #-0x10]!
|
||||
str q29, [\reg, #-0x10]!
|
||||
str q30, [\reg, #-0x10]!
|
||||
str q31, [\reg, #-0x10]!
|
||||
.endm
|
||||
.macro RESTORE_FPU, reg
|
||||
ldr q31, [\reg], #0x10
|
||||
ldr q30, [\reg], #0x10
|
||||
ldr q29, [\reg], #0x10
|
||||
ldr q28, [\reg], #0x10
|
||||
ldr q27, [\reg], #0x10
|
||||
ldr q26, [\reg], #0x10
|
||||
ldr q25, [\reg], #0x10
|
||||
ldr q24, [\reg], #0x10
|
||||
ldr q23, [\reg], #0x10
|
||||
ldr q22, [\reg], #0x10
|
||||
ldr q21, [\reg], #0x10
|
||||
ldr q20, [\reg], #0x10
|
||||
ldr q19, [\reg], #0x10
|
||||
ldr q18, [\reg], #0x10
|
||||
ldr q17, [\reg], #0x10
|
||||
ldr q16, [\reg], #0x10
|
||||
ldr q15, [\reg], #0x10
|
||||
ldr q14, [\reg], #0x10
|
||||
ldr q13, [\reg], #0x10
|
||||
ldr q12, [\reg], #0x10
|
||||
ldr q11, [\reg], #0x10
|
||||
ldr q10, [\reg], #0x10
|
||||
ldr q9, [\reg], #0x10
|
||||
ldr q8, [\reg], #0x10
|
||||
ldr q7, [\reg], #0x10
|
||||
ldr q6, [\reg], #0x10
|
||||
ldr q5, [\reg], #0x10
|
||||
ldr q4, [\reg], #0x10
|
||||
ldr q3, [\reg], #0x10
|
||||
ldr q2, [\reg], #0x10
|
||||
ldr q1, [\reg], #0x10
|
||||
ldr q0, [\reg], #0x10
|
||||
.endm
|
||||
|
||||
#endif /* __ARM64_ASM_FPU_H__ */
|
||||
45
RT_Thread/libcpu/aarch64/common/include/asm-generic.h
Normal file
45
RT_Thread/libcpu/aarch64/common/include/asm-generic.h
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023 RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-03-12 WangXiaoyao the first version
|
||||
*/
|
||||
#ifndef __ASM_GENERIC_H__
|
||||
#define __ASM_GENERIC_H__
|
||||
|
||||
/* use to mark a start point where every task start from */
|
||||
#define START_POINT(funcname) \
|
||||
.global funcname; \
|
||||
.type funcname, %function; \
|
||||
funcname: \
|
||||
.cfi_sections .debug_frame, .eh_frame; \
|
||||
.cfi_startproc; \
|
||||
.cfi_undefined x30
|
||||
|
||||
#define START_POINT_END(name) \
|
||||
.cfi_endproc; \
|
||||
.size name, .-name;
|
||||
|
||||
#define TRACE_SYMBOL(name)
|
||||
|
||||
.macro NEVER_RETURN
|
||||
#ifdef RT_USING_DEBUG
|
||||
b .
|
||||
#endif /* RT_USING_DEBUG */
|
||||
.endm
|
||||
|
||||
.macro GET_THREAD_SELF, dst:req
|
||||
#ifdef ARCH_USING_HW_THREAD_SELF
|
||||
mrs x0, tpidr_el1
|
||||
#else /* !ARCH_USING_HW_THREAD_SELF */
|
||||
bl rt_thread_self
|
||||
#endif /* ARCH_USING_HW_THREAD_SELF */
|
||||
.if \dst != x0
|
||||
mov dst, x0
|
||||
.endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_GENERIC_H__ */
|
||||
39
RT_Thread/libcpu/aarch64/common/include/cache.h
Normal file
39
RT_Thread/libcpu/aarch64/common/include/cache.h
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2022-12-18 RT-Thread the first version
|
||||
*/
|
||||
|
||||
#ifndef __CACHE_H__
|
||||
#define __CACHE_H__
|
||||
|
||||
#include <rtdef.h>
|
||||
|
||||
void __asm_invalidate_icache_all(void);
|
||||
|
||||
void rt_hw_dcache_flush_all(void);
|
||||
void rt_hw_dcache_invalidate_all(void);
|
||||
void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size);
|
||||
void rt_hw_cpu_dcache_clean(void *addr, unsigned long size);
|
||||
void rt_hw_cpu_dcache_invalidate(void *start_addr, unsigned long size);
|
||||
|
||||
static inline void rt_hw_icache_invalidate_all(void)
|
||||
{
|
||||
/* wait for previous modification to complete */
|
||||
__asm__ volatile ("dsb ishst");
|
||||
|
||||
__asm__ volatile ("ic ialluis");
|
||||
/* wait for ic to retire */
|
||||
__asm__ volatile ("dsb nsh");
|
||||
/* flush instruction pipeline */
|
||||
__asm__ volatile ("isb");
|
||||
}
|
||||
|
||||
void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size);
|
||||
void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, rt_size_t size);
|
||||
|
||||
#endif /* __CACHE_H__ */
|
||||
71
RT_Thread/libcpu/aarch64/common/include/context_gcc.h
Normal file
71
RT_Thread/libcpu/aarch64/common/include/context_gcc.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
|
||||
*/
|
||||
#ifndef __ARM64_INC_CONTEXT_H__
|
||||
#define __ARM64_INC_CONTEXT_H__
|
||||
|
||||
#include "armv8.h"
|
||||
|
||||
.macro SAVE_CONTEXT_SWITCH, tmpx, tmp2x
|
||||
/* Save the entire context. */
|
||||
SAVE_FPU sp
|
||||
|
||||
stp x19, x20, [sp, #-0x10]!
|
||||
stp x21, x22, [sp, #-0x10]!
|
||||
stp x23, x24, [sp, #-0x10]!
|
||||
stp x25, x26, [sp, #-0x10]!
|
||||
stp x27, x28, [sp, #-0x10]!
|
||||
|
||||
mrs \tmpx, sp_el0
|
||||
stp x29, \tmpx, [sp, #-0x10]!
|
||||
|
||||
mrs \tmpx, fpcr
|
||||
mrs \tmp2x, fpsr
|
||||
stp \tmpx, \tmp2x, [sp, #-0x10]!
|
||||
|
||||
mov \tmpx, #((3 << 6) | 0x5) /* el1h, disable interrupt */
|
||||
stp x30, \tmpx, [sp, #-0x10]!
|
||||
|
||||
.endm
|
||||
|
||||
.macro SAVE_CONTEXT_SWITCH_FAST
|
||||
/* Save the entire context. */
|
||||
add sp, sp, #-1 * CONTEXT_FPU_SIZE
|
||||
|
||||
add sp, sp, #-7 * 16
|
||||
|
||||
mov x19, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
|
||||
stp lr, x19, [sp, #-0x10]!
|
||||
|
||||
.endm
|
||||
|
||||
.macro _RESTORE_CONTEXT_SWITCH
|
||||
ldp x30, x19, [sp], #0x10 /* SPSR and ELR. */
|
||||
msr elr_el1, x30
|
||||
msr spsr_el1, x19
|
||||
|
||||
|
||||
/* restore NEON */
|
||||
ldp x19, x20, [sp], #0x10
|
||||
msr fpcr, x19
|
||||
msr fpsr, x20
|
||||
|
||||
ldp x29, x19, [sp], #0x10
|
||||
msr sp_el0, x19
|
||||
ldp x27, x28, [sp], #0x10
|
||||
ldp x25, x26, [sp], #0x10
|
||||
ldp x23, x24, [sp], #0x10
|
||||
ldp x21, x22, [sp], #0x10
|
||||
ldp x19, x20, [sp], #0x10
|
||||
|
||||
RESTORE_FPU sp
|
||||
eret
|
||||
.endm
|
||||
|
||||
#endif /* __ARM64_INC_CONTEXT_H__ */
|
||||
65
RT_Thread/libcpu/aarch64/common/include/cp15.h
Normal file
65
RT_Thread/libcpu/aarch64/common/include/cp15.h
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2011-09-15 Bernard first version
|
||||
*/
|
||||
|
||||
#ifndef __CP15_H__
|
||||
#define __CP15_H__
|
||||
|
||||
#ifndef __STATIC_FORCEINLINE
|
||||
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
|
||||
#endif
|
||||
|
||||
#define __WFI() __asm__ volatile ("wfi":::"memory")
|
||||
|
||||
#define __WFE() __asm__ volatile ("wfe":::"memory")
|
||||
|
||||
#define __SEV() __asm__ volatile ("sev")
|
||||
|
||||
__STATIC_FORCEINLINE void __ISB(void)
|
||||
{
|
||||
__asm__ volatile ("isb 0xF":::"memory");
|
||||
}
|
||||
|
||||
/**
|
||||
\brief Data Synchronization Barrier
|
||||
\details Acts as a special kind of Data Memory Barrier.
|
||||
It completes when all explicit memory accesses before this instruction complete.
|
||||
*/
|
||||
__STATIC_FORCEINLINE void __DSB(void)
|
||||
{
|
||||
__asm__ volatile ("dsb 0xF":::"memory");
|
||||
}
|
||||
|
||||
/**
|
||||
\brief Data Memory Barrier
|
||||
\details Ensures the apparent order of the explicit memory operations before
|
||||
and after the instruction, without ensuring their completion.
|
||||
*/
|
||||
|
||||
__STATIC_FORCEINLINE void __DMB(void)
|
||||
{
|
||||
__asm__ volatile ("dmb 0xF":::"memory");
|
||||
}
|
||||
|
||||
unsigned long rt_cpu_get_smp_id(void);
|
||||
|
||||
void rt_cpu_mmu_disable(void);
|
||||
void rt_cpu_mmu_enable(void);
|
||||
void rt_cpu_tlb_set(volatile unsigned long*);
|
||||
|
||||
void rt_cpu_dcache_clean_flush(void);
|
||||
void rt_cpu_icache_flush(void);
|
||||
|
||||
void rt_cpu_vector_set_base(rt_ubase_t addr);
|
||||
void rt_hw_mmu_init(void);
|
||||
void rt_hw_vector_init(void);
|
||||
|
||||
void set_timer_counter(unsigned int counter);
|
||||
void set_timer_control(unsigned int control);
|
||||
#endif
|
||||
92
RT_Thread/libcpu/aarch64/common/include/cpu.h
Normal file
92
RT_Thread/libcpu/aarch64/common/include/cpu.h
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
#ifndef __RT_HW_CPU_H__
|
||||
#define __RT_HW_CPU_H__
|
||||
|
||||
#include <rtdef.h>
|
||||
#include <cpuport.h>
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#ifdef RT_USING_OFW
|
||||
#include <drivers/ofw.h>
|
||||
#endif
|
||||
|
||||
#define ID_ERROR __INT64_MAX__
|
||||
#define MPIDR_AFFINITY_MASK 0x000000ff00ffffffUL
|
||||
|
||||
struct cpu_ops_t
|
||||
{
|
||||
const char *method;
|
||||
int (*cpu_init)(rt_uint32_t id, void *param);
|
||||
int (*cpu_boot)(rt_uint32_t id, rt_uint64_t entry);
|
||||
void (*cpu_shutdown)(void);
|
||||
};
|
||||
#define sysreg_32(op1, crn, crm, op2) s3_##op1 ##_##crn ##_##crm ##_##op2
|
||||
#define sysreg_64(op1, crn, crm, op2) sysreg_32(op1, crn, crm, op2)
|
||||
|
||||
#define MPIDR_AFFINITY_MASK 0x000000ff00ffffffUL
|
||||
|
||||
#define MPIDR_LEVEL_BITS_SHIFT 3
|
||||
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
|
||||
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
|
||||
#define MPIDR_LEVEL_SHIFT(level) (((1 << (level)) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
|
||||
|
||||
#define MPIDR_AFFINITY_LEVEL(mpidr, level) (((mpidr) >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
|
||||
|
||||
/* GIC registers */
|
||||
#define ICC_IAR0_SYS sysreg_64(0, c12, c8, 0)
|
||||
#define ICC_IAR1_SYS sysreg_64(0, c12, c12, 0)
|
||||
#define ICC_EOIR0_SYS sysreg_64(0, c12, c8, 1)
|
||||
#define ICC_EOIR1_SYS sysreg_64(0, c12, c12, 1)
|
||||
#define ICC_HPPIR0_SYS sysreg_64(0, c12, c8, 2)
|
||||
#define ICC_HPPIR1_SYS sysreg_64(0, c12, c12, 2)
|
||||
#define ICC_BPR0_SYS sysreg_64(0, c12, c8, 3)
|
||||
#define ICC_BPR1_SYS sysreg_64(0, c12, c12, 3)
|
||||
#define ICC_DIR_SYS sysreg_64(0, c12, c11, 1)
|
||||
#define ICC_PMR_SYS sysreg_64(0, c4, c6, 0)
|
||||
#define ICC_RPR_SYS sysreg_64(0, c12, c11, 3)
|
||||
#define ICC_CTLR_SYS sysreg_64(0, c12, c12, 4)
|
||||
#define ICC_SRE_SYS sysreg_64(0, c12, c12, 5)
|
||||
#define ICC_IGRPEN0_SYS sysreg_64(0, c12, c12, 6)
|
||||
#define ICC_IGRPEN1_SYS sysreg_64(0, c12, c12, 7)
|
||||
#define ICC_SGI0R_SYS sysreg_64(0, c12, c11, 7)
|
||||
#define ICC_SGI1R_SYS sysreg_64(0, c12, c11, 5)
|
||||
#define ICC_ASGI1R_SYS sysreg_64(0, c12, c11, 6)
|
||||
|
||||
/* Arch timer registers */
|
||||
#define CNTP_CTL CNTP_CTL_EL0 /* EL1 Physical Timer */
|
||||
#define CNTHP_CTL CNTHP_CTL_EL2 /* EL2 Non-secure Physical Timer */
|
||||
#define CNTHPS_CTL CNTHPS_CTL_EL2 /* EL2 Secure Physical Timer */
|
||||
#define CNTPS_CTL CNTPS_CTL_EL1 /* EL3 Physical Timer */
|
||||
#define CNTV_CTL CNTV_CTL_EL0 /* EL1 Virtual Timer */
|
||||
#define CNTHV_CTL CNTHV_CTL_EL2 /* EL2 Non-secure Virtual Timer */
|
||||
#define CNTHVS_CTL CNTHVS_CTL_EL2 /* EL2 Secure Virtual Timer */
|
||||
|
||||
#define CNTP_CVAL CNTP_CVAL_EL0
|
||||
#define CNTHP_CVAL CNTHP_CVAL_EL2
|
||||
#define CNTHPS_CVAL CNTHPS_CVAL_EL2
|
||||
#define CNTPS_CVAL CNTPS_CVAL_EL1
|
||||
#define CNTV_CVAL CNTV_CVAL_EL0
|
||||
#define CNTHV_CVAL CNTHV_CVAL_EL2
|
||||
#define CNTHVS_CVAL CNTHVS_CVAL_EL2
|
||||
|
||||
#define CNTP_TVAL CNTP_TVAL_EL0
|
||||
#define CNTHP_TVAL CNTHP_TVAL_EL2
|
||||
#define CNTHPS_TVAL CNTHPS_TVAL_EL2
|
||||
#define CNTPS_TVAL CNTPS_TVAL_EL1
|
||||
#define CNTV_TVAL CNTV_TVAL_EL0
|
||||
#define CNTHV_TVAL CNTHV_TVAL_EL2
|
||||
#define CNTHVS_TVAL CNTHVS_TVAL_EL2
|
||||
|
||||
#define CNTPCT CNTPCT_EL0
|
||||
#define CNTVCT CNTVCT_EL0
|
||||
#define CNTFRQ CNTFRQ_EL0
|
||||
extern rt_uint64_t rt_cpu_mpidr_table[];
|
||||
|
||||
#endif /* __RT_HW_CPU_H__ */
|
||||
21
RT_Thread/libcpu/aarch64/common/include/cpu_ops_common.h
Normal file
21
RT_Thread/libcpu/aarch64/common/include/cpu_ops_common.h
Normal file
@ -0,0 +1,21 @@
|
||||
#ifndef __CPU_OPS_COMMON_H__
|
||||
#define __CPU_OPS_COMMON_H__
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <mmu.h>
|
||||
#include "entry_point.h"
|
||||
|
||||
static inline rt_uint64_t get_secondary_entry_pa(void)
|
||||
{
|
||||
rt_uint64_t secondary_entry_pa = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
|
||||
|
||||
if (!secondary_entry_pa)
|
||||
{
|
||||
LOG_E("Failed to translate 'secondary_entry_pa' to physical address");
|
||||
return 0;
|
||||
}
|
||||
return secondary_entry_pa;
|
||||
}
|
||||
|
||||
#endif /* __CPU_OPS_COMMON_H__ */
|
||||
72
RT_Thread/libcpu/aarch64/common/include/cpuport.h
Normal file
72
RT_Thread/libcpu/aarch64/common/include/cpuport.h
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-10-25 Shell Move ffs to cpuport, add general implementation
|
||||
* by inline assembly
|
||||
* 2024-01-18 Shell support rt_hw_thread_self to improve overall performance
|
||||
*/
|
||||
|
||||
#ifndef CPUPORT_H__
|
||||
#define CPUPORT_H__
|
||||
|
||||
#include <armv8.h>
|
||||
#include <rtcompiler.h>
|
||||
#include <rttypes.h>
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
|
||||
/**
|
||||
* Spinlock
|
||||
*/
|
||||
|
||||
typedef struct
|
||||
{
|
||||
rt_uint32_t value;
|
||||
} rt_hw_spinlock_t;
|
||||
|
||||
#endif /* RT_USING_SMP */
|
||||
|
||||
#define rt_hw_barrier(cmd, ...) \
|
||||
__asm__ volatile (RT_STRINGIFY(cmd) " "RT_STRINGIFY(__VA_ARGS__):::"memory")
|
||||
|
||||
#define rt_hw_isb() rt_hw_barrier(isb)
|
||||
#define rt_hw_dmb() rt_hw_barrier(dmb, ish)
|
||||
#define rt_hw_wmb() rt_hw_barrier(dmb, ishst)
|
||||
#define rt_hw_rmb() rt_hw_barrier(dmb, ishld)
|
||||
#define rt_hw_dsb() rt_hw_barrier(dsb, ish)
|
||||
|
||||
#define rt_hw_wfi() rt_hw_barrier(wfi)
|
||||
#define rt_hw_wfe() rt_hw_barrier(wfe)
|
||||
#define rt_hw_sev() rt_hw_barrier(sev)
|
||||
|
||||
#define rt_hw_cpu_relax() rt_hw_barrier(yield)
|
||||
|
||||
#define rt_hw_sysreg_write(sysreg, val) \
|
||||
__asm__ volatile ("msr "RT_STRINGIFY(sysreg)", %0"::"r"((rt_uint64_t)(val)))
|
||||
|
||||
#define rt_hw_sysreg_read(sysreg, val) \
|
||||
__asm__ volatile ("mrs %0, "RT_STRINGIFY(sysreg)"":"=r"((val)))
|
||||
|
||||
void _thread_start(void);
|
||||
|
||||
#ifdef ARCH_USING_HW_THREAD_SELF
|
||||
rt_inline struct rt_thread *rt_hw_thread_self(void)
|
||||
{
|
||||
struct rt_thread *thread;
|
||||
__asm__ volatile ("mrs %0, " RT_STRINGIFY(ARM64_THREAD_REG) :"=r"(thread));
|
||||
|
||||
return thread;
|
||||
}
|
||||
|
||||
rt_inline void rt_hw_thread_set_self(struct rt_thread *thread)
|
||||
{
|
||||
__asm__ volatile ("msr " RT_STRINGIFY(ARM64_THREAD_REG) ", %0"::"r"(thread));
|
||||
}
|
||||
|
||||
#endif /* ARCH_USING_HW_THREAD_SELF */
|
||||
|
||||
#endif /*CPUPORT_H__*/
|
||||
13
RT_Thread/libcpu/aarch64/common/include/entry_point.h
Normal file
13
RT_Thread/libcpu/aarch64/common/include/entry_point.h
Normal file
@ -0,0 +1,13 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
#ifndef __ENTRY_POINT_H__
|
||||
#define __ENTRY_POINT_H__
|
||||
|
||||
extern void _secondary_cpu_entry(void);
|
||||
#endif /* __ENTRY_POINT_H__ */
|
||||
62
RT_Thread/libcpu/aarch64/common/include/gic.h
Normal file
62
RT_Thread/libcpu/aarch64/common/include/gic.h
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2013-07-20 Bernard first version
|
||||
*/
|
||||
|
||||
#ifndef __GIC_H__
|
||||
#define __GIC_H__
|
||||
|
||||
#include <rthw.h>
|
||||
#include <board.h>
|
||||
|
||||
int arm_gic_get_active_irq(rt_uint64_t index);
|
||||
void arm_gic_ack(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_mask(rt_uint64_t index, int irq);
|
||||
void arm_gic_umask(rt_uint64_t index, int irq);
|
||||
|
||||
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq);
|
||||
void arm_gic_set_pending_irq(rt_uint64_t index, int irq);
|
||||
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_configuration(rt_uint64_t index, int irq, uint32_t config);
|
||||
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_clear_active(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask);
|
||||
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority);
|
||||
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority);
|
||||
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index);
|
||||
|
||||
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point);
|
||||
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index);
|
||||
|
||||
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_send_sgi(rt_uint64_t index, int irq, rt_uint64_t target_list, rt_uint64_t filter_list);
|
||||
|
||||
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index);
|
||||
|
||||
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index);
|
||||
|
||||
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group);
|
||||
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq);
|
||||
|
||||
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start);
|
||||
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base);
|
||||
|
||||
void arm_gic_dump_type(rt_uint64_t index);
|
||||
void arm_gic_dump(rt_uint64_t index);
|
||||
|
||||
#endif
|
||||
|
||||
198
RT_Thread/libcpu/aarch64/common/include/gicv3.h
Normal file
198
RT_Thread/libcpu/aarch64/common/include/gicv3.h
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2013-07-20 Bernard first version
|
||||
* 2014-04-03 Grissiom many enhancements
|
||||
* 2018-11-22 Jesven add rt_hw_ipi_send()
|
||||
* add rt_hw_ipi_handler_install()
|
||||
*/
|
||||
|
||||
#ifndef __GICV3_H__
|
||||
#define __GICV3_H__
|
||||
|
||||
#include <rtdef.h>
|
||||
|
||||
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3)
|
||||
|
||||
|
||||
#ifndef ARM_GIC_CPU_NUM
|
||||
#define ARM_GIC_CPU_NUM RT_CPUS_NR
|
||||
#endif
|
||||
|
||||
#define GICV3_ROUTED_TO_ALL 1UL
|
||||
#define GICV3_ROUTED_TO_SPEC 0UL
|
||||
#define GET_GICV3_REG(reg, out) __asm__ volatile ("mrs %0, " reg:"=r"(out)::"memory");
|
||||
#define SET_GICV3_REG(reg, in) __asm__ volatile ("msr " reg ", %0"::"r"(in):"memory");
|
||||
|
||||
/* AArch64 System register interface to GICv3 */
|
||||
#define ICC_IAR0_EL1 "S3_0_C12_C8_0"
|
||||
#define ICC_IAR1_EL1 "S3_0_C12_C12_0"
|
||||
#define ICC_EOIR0_EL1 "S3_0_C12_C8_1"
|
||||
#define ICC_EOIR1_EL1 "S3_0_C12_C12_1"
|
||||
#define ICC_HPPIR0_EL1 "S3_0_C12_C8_2"
|
||||
#define ICC_HPPIR1_EL1 "S3_0_C12_C12_2"
|
||||
#define ICC_BPR0_EL1 "S3_0_C12_C8_3"
|
||||
#define ICC_BPR1_EL1 "S3_0_C12_C12_3"
|
||||
#define ICC_DIR_EL1 "S3_0_C12_C11_1"
|
||||
#define ICC_PMR_EL1 "S3_0_C4_C6_0"
|
||||
#define ICC_RPR_EL1 "S3_0_C12_C11_3"
|
||||
#define ICC_CTLR_EL1 "S3_0_C12_C12_4"
|
||||
#define ICC_CTLR_EL3 "S3_6_C12_C12_4"
|
||||
#define ICC_SRE_EL1 "S3_0_C12_C12_5"
|
||||
#define ICC_SRE_EL2 "S3_4_C12_C9_5"
|
||||
#define ICC_SRE_EL3 "S3_6_C12_C12_5"
|
||||
#define ICC_IGRPEN0_EL1 "S3_0_C12_C12_6"
|
||||
#define ICC_IGRPEN1_EL1 "S3_0_C12_C12_7"
|
||||
#define ICC_IGRPEN1_EL3 "S3_6_C12_C12_7"
|
||||
#define ICC_SGI0R_EL1 "S3_0_C12_C11_7"
|
||||
#define ICC_SGI1R_EL1 "S3_0_C12_C11_5"
|
||||
#define ICC_ASGI1R_EL1 "S3_0_C12_C11_6"
|
||||
|
||||
/* Macro to access the Distributor Control Register (GICD_CTLR) */
|
||||
#define GICD_CTLR_RWP (1U << 31)
|
||||
#define GICD_CTLR_E1NWF (1U << 7)
|
||||
#define GICD_CTLR_DS (1U << 6)
|
||||
#define GICD_CTLR_ARE_NS (1U << 5)
|
||||
#define GICD_CTLR_ARE_S (1U << 4)
|
||||
#define GICD_CTLR_ENGRP1S (1U << 2)
|
||||
#define GICD_CTLR_ENGRP1NS (1U << 1)
|
||||
#define GICD_CTLR_ENGRP0 (1U << 0)
|
||||
|
||||
/* Macro to access the Redistributor Control Register (GICR_CTLR) */
|
||||
#define GICR_CTLR_UWP (1U << 31)
|
||||
#define GICR_CTLR_DPG1S (1U << 26)
|
||||
#define GICR_CTLR_DPG1NS (1U << 25)
|
||||
#define GICR_CTLR_DPG0 (1U << 24)
|
||||
#define GICR_CTLR_RWP (1U << 3)
|
||||
#define GICR_CTLR_IR (1U << 2)
|
||||
#define GICR_CTLR_CES (1U << 1)
|
||||
#define GICR_CTLR_EnableLPI (1U << 0)
|
||||
|
||||
/* Macro to access the Generic Interrupt Controller Interface (GICC) */
|
||||
#define GIC_CPU_CTRL(hw_base) HWREG32((hw_base) + 0x00U)
|
||||
#define GIC_CPU_PRIMASK(hw_base) HWREG32((hw_base) + 0x04U)
|
||||
#define GIC_CPU_BINPOINT(hw_base) HWREG32((hw_base) + 0x08U)
|
||||
#define GIC_CPU_INTACK(hw_base) HWREG32((hw_base) + 0x0cU)
|
||||
#define GIC_CPU_EOI(hw_base) HWREG32((hw_base) + 0x10U)
|
||||
#define GIC_CPU_RUNNINGPRI(hw_base) HWREG32((hw_base) + 0x14U)
|
||||
#define GIC_CPU_HIGHPRI(hw_base) HWREG32((hw_base) + 0x18U)
|
||||
#define GIC_CPU_IIDR(hw_base) HWREG32((hw_base) + 0xFCU)
|
||||
|
||||
/* Macro to access the Generic Interrupt Controller Distributor (GICD) */
|
||||
#define GIC_DIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U)
|
||||
#define GIC_DIST_TYPE(hw_base) HWREG32((hw_base) + 0x004U)
|
||||
#define GIC_DIST_IIDR(hw_base) HWREG32((hw_base) + 0x008U)
|
||||
#define GIC_DIST_IGROUP(hw_base, n) HWREG32((hw_base) + 0x080U + ((n) / 32U) * 4U)
|
||||
#define GIC_DIST_ENABLE_SET(hw_base, n) HWREG32((hw_base) + 0x100U + ((n) / 32U) * 4U)
|
||||
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x180U + ((n) / 32U) * 4U)
|
||||
#define GIC_DIST_PENDING_SET(hw_base, n) HWREG32((hw_base) + 0x200U + ((n) / 32U) * 4U)
|
||||
#define GIC_DIST_PENDING_CLEAR(hw_base, n) HWREG32((hw_base) + 0x280U + ((n) / 32U) * 4U)
|
||||
#define GIC_DIST_ACTIVE_SET(hw_base, n) HWREG32((hw_base) + 0x300U + ((n) / 32U) * 4U)
|
||||
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x380U + ((n) / 32U) * 4U)
|
||||
#define GIC_DIST_PRI(hw_base, n) HWREG32((hw_base) + 0x400U + ((n) / 4U) * 4U)
|
||||
#define GIC_DIST_TARGET(hw_base, n) HWREG32((hw_base) + 0x800U + ((n) / 4U) * 4U)
|
||||
#define GIC_DIST_CONFIG(hw_base, n) HWREG32((hw_base) + 0xc00U + ((n) / 16U) * 4U)
|
||||
#define GIC_DIST_SOFTINT(hw_base) HWREG32((hw_base) + 0xf00U)
|
||||
#define GIC_DIST_CPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf10U + ((n) / 4U) * 4U)
|
||||
#define GIC_DIST_SPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf20U + ((n) / 4U) * 4U)
|
||||
#define GIC_DIST_ICPIDR2(hw_base) HWREG32((hw_base) + 0xfe8U)
|
||||
#define GIC_DIST_IROUTER(hw_base, n) HWREG64((hw_base) + 0x6000U + (n) * 8U)
|
||||
|
||||
/* SGI base address is at 64K offset from Redistributor base address */
|
||||
#define GIC_RSGI_OFFSET 0x10000
|
||||
|
||||
/* Macro to access the Generic Interrupt Controller Redistributor (GICR) */
|
||||
#define GIC_RDIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U)
|
||||
#define GIC_RDIST_IIDR(hw_base) HWREG32((hw_base) + 0x004U)
|
||||
#define GIC_RDIST_TYPER(hw_base) HWREG64((hw_base) + 0x008U)
|
||||
#define GIC_RDIST_TSTATUSR(hw_base) HWREG32((hw_base) + 0x010U)
|
||||
#define GIC_RDIST_WAKER(hw_base) HWREG32((hw_base) + 0x014U)
|
||||
#define GIC_RDIST_SETLPIR(hw_base) HWREG32((hw_base) + 0x040U)
|
||||
#define GIC_RDIST_CLRLPIR(hw_base) HWREG32((hw_base) + 0x048U)
|
||||
#define GIC_RDIST_PROPBASER(hw_base) HWREG32((hw_base) + 0x070U)
|
||||
#define GIC_RDIST_PENDBASER(hw_base) HWREG32((hw_base) + 0x078U)
|
||||
#define GIC_RDIST_INVLPIR(hw_base) HWREG32((hw_base) + 0x0A0U)
|
||||
#define GIC_RDIST_INVALLR(hw_base) HWREG32((hw_base) + 0x0B0U)
|
||||
#define GIC_RDIST_SYNCR(hw_base) HWREG32((hw_base) + 0x0C0U)
|
||||
|
||||
#define GIC_RDISTSGI_IGROUPR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x080U + (n) * 4U)
|
||||
#define GIC_RDISTSGI_ISENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x100U)
|
||||
#define GIC_RDISTSGI_ICENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x180U)
|
||||
#define GIC_RDISTSGI_ISPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x200U)
|
||||
#define GIC_RDISTSGI_ICPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x280U)
|
||||
#define GIC_RDISTSGI_ISACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x300U)
|
||||
#define GIC_RDISTSGI_ICACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x380U)
|
||||
#define GIC_RDISTSGI_IPRIORITYR(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x400U + ((n) / 4U) * 4U)
|
||||
#define GIC_RDISTSGI_ICFGR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC00U)
|
||||
#define GIC_RDISTSGI_ICFGR1(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC04U)
|
||||
#define GIC_RDISTSGI_IGRPMODR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xD00U + (n) * 4)
|
||||
#define GIC_RDISTSGI_NSACR(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xE00U)
|
||||
|
||||
struct arm_gic
|
||||
{
|
||||
rt_uint64_t offset; /* the first interrupt index in the vector table */
|
||||
rt_uint64_t redist_hw_base[ARM_GIC_CPU_NUM]; /* the pointer of the gic redistributor */
|
||||
rt_uint64_t dist_hw_base; /* the base address of the gic distributor */
|
||||
rt_uint64_t cpu_hw_base[ARM_GIC_CPU_NUM]; /* the base address of the gic cpu interface */
|
||||
};
|
||||
|
||||
int arm_gic_get_active_irq(rt_uint64_t index);
|
||||
void arm_gic_ack(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_mask(rt_uint64_t index, int irq);
|
||||
void arm_gic_umask(rt_uint64_t index, int irq);
|
||||
|
||||
rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq);
|
||||
void arm_gic_set_pending_irq(rt_uint64_t index, int irq);
|
||||
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config);
|
||||
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_clear_active(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_router_cpu(rt_uint64_t index, int irq, rt_uint64_t aff);
|
||||
void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask);
|
||||
rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority);
|
||||
rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq);
|
||||
|
||||
void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority);
|
||||
rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index);
|
||||
|
||||
void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point);
|
||||
rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index);
|
||||
|
||||
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq);
|
||||
|
||||
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
|
||||
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode);
|
||||
#endif
|
||||
|
||||
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index);
|
||||
|
||||
rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index);
|
||||
|
||||
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group);
|
||||
rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq);
|
||||
|
||||
int arm_gic_redist_address_set(rt_uint64_t index, rt_uint64_t redist_addr, int cpu_id);
|
||||
int arm_gic_cpu_interface_address_set(rt_uint64_t index, rt_uint64_t interface_addr, int cpu_id);
|
||||
|
||||
int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start);
|
||||
int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base);
|
||||
int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base);
|
||||
|
||||
rt_uint64_t *arm_gic_get_gic_table_addr(void);
|
||||
void arm_gic_dump_type(rt_uint64_t index);
|
||||
void arm_gic_dump(rt_uint64_t index);
|
||||
|
||||
#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) */
|
||||
|
||||
#endif
|
||||
|
||||
33
RT_Thread/libcpu/aarch64/common/include/gtimer.h
Normal file
33
RT_Thread/libcpu/aarch64/common/include/gtimer.h
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2011-12-20 GuEe-GUI first version
|
||||
*/
|
||||
|
||||
#ifndef __GTIMER_H__
|
||||
#define __GTIMER_H__
|
||||
|
||||
#include <rtdef.h>
|
||||
|
||||
void rt_hw_gtimer_init(void);
|
||||
void rt_hw_gtimer_local_enable(void);
|
||||
void rt_hw_gtimer_local_disable(void);
|
||||
|
||||
void rt_hw_gtimer_enable();
|
||||
|
||||
rt_inline void rt_hw_gtimer_disable(void)
|
||||
{
|
||||
__asm__ volatile ("msr CNTP_CTL_EL0, xzr":::"memory");
|
||||
}
|
||||
|
||||
void rt_hw_set_gtimer_val(rt_uint64_t value);
|
||||
rt_uint64_t rt_hw_get_gtimer_val();
|
||||
rt_uint64_t rt_hw_get_cntpct_val();
|
||||
rt_uint64_t rt_hw_get_gtimer_frq();
|
||||
rt_uint64_t rt_hw_set_gtimer_frq(rt_uint64_t value);
|
||||
|
||||
#endif /* __GTIMER_H__ */
|
||||
27
RT_Thread/libcpu/aarch64/common/include/hypercall.h
Normal file
27
RT_Thread/libcpu/aarch64/common/include/hypercall.h
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2022-02-24 GuEe-GUI first version
|
||||
*/
|
||||
|
||||
#ifndef __HYPERCALL_H__
|
||||
#define __HYPERCALL_H__
|
||||
|
||||
#include <rtdef.h>
|
||||
|
||||
rt_inline rt_uint32_t rt_hw_hypercall(rt_uint32_t w0, rt_uint64_t x1, rt_uint64_t x2,
|
||||
rt_uint64_t x3, rt_uint64_t x4, rt_uint64_t x5, rt_uint64_t x6, rt_uint32_t w7)
|
||||
{
|
||||
register rt_uint64_t ret __asm__ ("x0");
|
||||
__asm__ volatile ("hvc #0");
|
||||
|
||||
return (rt_uint32_t)ret;
|
||||
}
|
||||
|
||||
rt_err_t rt_hv_stage2_map(unsigned long paddr, unsigned long size);
|
||||
|
||||
#endif
|
||||
60
RT_Thread/libcpu/aarch64/common/include/interrupt.h
Normal file
60
RT_Thread/libcpu/aarch64/common/include/interrupt.h
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2018, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2013-07-06 Bernard first version
|
||||
*/
|
||||
|
||||
#ifndef __INTERRUPT_H__
|
||||
#define __INTERRUPT_H__
|
||||
|
||||
#include <rthw.h>
|
||||
#include <board.h>
|
||||
|
||||
#define INT_IRQ 0x00
|
||||
#define INT_FIQ 0x01
|
||||
|
||||
#define IRQ_MODE_TRIG_LEVEL (0x00) /* Trigger: level triggered interrupt */
|
||||
#define IRQ_MODE_TRIG_EDGE (0x01) /* Trigger: edge triggered interrupt */
|
||||
#define IRQ_MODE_MASK (0x01)
|
||||
|
||||
void rt_hw_vector_init(void);
|
||||
|
||||
void rt_hw_interrupt_init(void);
|
||||
void rt_hw_interrupt_mask(int vector);
|
||||
void rt_hw_interrupt_umask(int vector);
|
||||
|
||||
int rt_hw_interrupt_get_irq(void);
|
||||
void rt_hw_interrupt_ack(int vector);
|
||||
|
||||
void rt_hw_interrupt_set_target_cpus(int vector, unsigned long cpu_mask);
|
||||
unsigned int rt_hw_interrupt_get_target_cpus(int vector);
|
||||
|
||||
void rt_hw_interrupt_set_triger_mode(int vector, unsigned int mode);
|
||||
unsigned int rt_hw_interrupt_get_triger_mode(int vector);
|
||||
|
||||
void rt_hw_interrupt_set_pending(int vector);
|
||||
unsigned int rt_hw_interrupt_get_pending(int vector);
|
||||
void rt_hw_interrupt_clear_pending(int vector);
|
||||
|
||||
void rt_hw_interrupt_set_priority(int vector, unsigned int priority);
|
||||
unsigned int rt_hw_interrupt_get_priority(int vector);
|
||||
|
||||
void rt_hw_interrupt_set_priority_mask(unsigned int priority);
|
||||
unsigned int rt_hw_interrupt_get_priority_mask(void);
|
||||
|
||||
int rt_hw_interrupt_set_prior_group_bits(unsigned int bits);
|
||||
unsigned int rt_hw_interrupt_get_prior_group_bits(void);
|
||||
|
||||
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
|
||||
void *param, const char *name);
|
||||
|
||||
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
|
||||
void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
218
RT_Thread/libcpu/aarch64/common/include/mmu.h
Normal file
218
RT_Thread/libcpu/aarch64/common/include/mmu.h
Normal file
@ -0,0 +1,218 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-12 RT-Thread the first version
|
||||
* 2023-08-15 Shell Support more mapping attribution
|
||||
*/
|
||||
#ifndef __MMU_H_
|
||||
#define __MMU_H_
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <mm_aspace.h>
|
||||
|
||||
/* normal memory wra mapping type */
|
||||
#define NORMAL_MEM 0
|
||||
/* normal nocache memory mapping type */
|
||||
#define NORMAL_NOCACHE_MEM 1
|
||||
/* device mapping type */
|
||||
#define DEVICE_MEM 2
|
||||
|
||||
struct mem_desc
|
||||
{
|
||||
unsigned long vaddr_start;
|
||||
unsigned long vaddr_end;
|
||||
unsigned long paddr_start;
|
||||
unsigned long attr;
|
||||
struct rt_varea varea;
|
||||
};
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define RT_HW_MMU_PROT_READ 1
|
||||
#define RT_HW_MMU_PROT_WRITE 2
|
||||
#define RT_HW_MMU_PROT_EXECUTE 4
|
||||
#define RT_HW_MMU_PROT_KERNEL 8
|
||||
#define RT_HW_MMU_PROT_USER 16
|
||||
#define RT_HW_MMU_PROT_CACHE 32
|
||||
|
||||
#define MMU_ASID_SHIFT 48
|
||||
#define MMU_NG_SHIFT 11 /* not global bit */
|
||||
#define MMU_AF_SHIFT 10
|
||||
#define MMU_SHARED_SHIFT 8
|
||||
#define MMU_AP_SHIFT 6
|
||||
#define MMU_MA_SHIFT 2
|
||||
#define MMU_AP_MASK (0x3 << MMU_AP_SHIFT)
|
||||
|
||||
/* we dont support feat detecting for now, so 8-bit is used to fallback */
|
||||
#define MMU_SUPPORTED_ASID_BITS 8
|
||||
|
||||
#define MMU_AP_KAUN 0UL /* kernel r/w, user none */
|
||||
#define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
|
||||
#define MMU_AP_KRUN 2UL /* kernel r, user none */
|
||||
#define MMU_AP_KRUR 3UL /* kernel r, user r */
|
||||
#define MMU_ATTR_AF (1ul << MMU_AF_SHIFT) /* the access flag */
|
||||
#define MMU_ATTR_DBM (1ul << 51) /* the dirty bit modifier */
|
||||
|
||||
#define MMU_MAP_CUSTOM(ap, mtype, nglobal) \
|
||||
((0x1UL << MMU_AF_SHIFT) | (0x2UL << MMU_SHARED_SHIFT) | \
|
||||
((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT)) | \
|
||||
((rt_ubase_t)(nglobal) << MMU_NG_SHIFT)
|
||||
#define MMU_MAP_K_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM, 0)
|
||||
#define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_NOCACHE_MEM, 0)
|
||||
#define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM, 0)
|
||||
#define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM, 0)
|
||||
#define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM, 0)
|
||||
#define MMU_MAP_U_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_MEM, 1)
|
||||
#define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM, 1)
|
||||
#define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM, 1)
|
||||
#define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM, 1)
|
||||
#define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM, 1)
|
||||
#define MMU_MAP_TRACE(attr) ((attr) & ~(MMU_ATTR_AF | MMU_ATTR_DBM))
|
||||
|
||||
#define ARCH_SECTION_SHIFT 21
|
||||
#define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
|
||||
#define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
|
||||
#define ARCH_PAGE_SHIFT 12
|
||||
#define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
|
||||
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
|
||||
#define ARCH_PAGE_TBL_SHIFT 12
|
||||
#define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
|
||||
#define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
|
||||
|
||||
#define ARCH_VADDR_WIDTH 48
|
||||
#define ARCH_ADDRESS_WIDTH_BITS 64
|
||||
|
||||
#define MMU_MAP_ERROR_VANOTALIGN -1
|
||||
#define MMU_MAP_ERROR_PANOTALIGN -2
|
||||
#define MMU_MAP_ERROR_NOPAGE -3
|
||||
#define MMU_MAP_ERROR_CONFLICT -4
|
||||
|
||||
#define ARCH_MAP_FAILED ((void *)0x1ffffffffffff)
|
||||
|
||||
#define ARCH_EARLY_MAP_SIZE (0x40000000)
|
||||
/* this is big enough for even 16TB first-time mapping */
|
||||
#define ARCH_PAGE_INIT_THRESHOLD (0x10000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct rt_aspace;
|
||||
|
||||
void rt_hw_mmu_ktbl_set(unsigned long tbl);
|
||||
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
|
||||
unsigned long size, unsigned long pv_off);
|
||||
void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc,
|
||||
int desc_nr);
|
||||
|
||||
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size, size_t *vtable, size_t pv_off);
|
||||
void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
|
||||
size_t size, size_t attr);
|
||||
void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size);
|
||||
void rt_hw_aspace_switch(struct rt_aspace *aspace);
|
||||
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
|
||||
void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, rt_size_t vaddr_start,
|
||||
rt_size_t size);
|
||||
void *rt_hw_mmu_pgtbl_create(void);
|
||||
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
|
||||
void *rt_hw_mmu_tbl_get(void);
|
||||
|
||||
static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)
|
||||
{
|
||||
rt_ubase_t par;
|
||||
void *paddr;
|
||||
__asm__ volatile("at s1e1w, %0"::"r"(v_addr):"memory");
|
||||
__asm__ volatile("mrs %0, par_el1":"=r"(par)::"memory");
|
||||
|
||||
if (par & 0x1)
|
||||
{
|
||||
paddr = ARCH_MAP_FAILED;
|
||||
}
|
||||
else
|
||||
{
|
||||
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
|
||||
par &= MMU_ADDRESS_MASK;
|
||||
par |= (rt_ubase_t)v_addr & ARCH_PAGE_MASK;
|
||||
paddr = (void *)par;
|
||||
}
|
||||
|
||||
return paddr;
|
||||
}
|
||||
/**
|
||||
* @brief Add permission from attribution
|
||||
*
|
||||
* @param attr architecture specified mmu attribution
|
||||
* @param prot protect that will be added
|
||||
* @return size_t returned attribution
|
||||
*/
|
||||
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
|
||||
{
|
||||
switch (prot)
|
||||
{
|
||||
/* remove write permission for user */
|
||||
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
|
||||
attr = (attr & ~MMU_AP_MASK) | (MMU_AP_KAUA << MMU_AP_SHIFT);
|
||||
break;
|
||||
default:
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
return attr;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Remove permission from attribution
|
||||
*
|
||||
* @param attr architecture specified mmu attribution
|
||||
* @param prot protect that will be removed
|
||||
* @return size_t returned attribution
|
||||
*/
|
||||
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
|
||||
{
|
||||
switch (prot)
|
||||
{
|
||||
/* remove write permission for user */
|
||||
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
|
||||
if (attr & 0x40)
|
||||
attr |= 0x80;
|
||||
break;
|
||||
default:
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
return attr;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Test permission from attribution
|
||||
*
|
||||
* @param attr architecture specified mmu attribution
|
||||
* @param prot protect that will be test
|
||||
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
|
||||
*/
|
||||
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
|
||||
{
|
||||
rt_bool_t rc;
|
||||
switch (prot)
|
||||
{
|
||||
/* test write permission for user */
|
||||
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
|
||||
if ((attr & MMU_AP_MASK) == (MMU_AP_KAUA << MMU_AP_SHIFT))
|
||||
rc = RT_TRUE;
|
||||
else
|
||||
rc = RT_FALSE;
|
||||
break;
|
||||
default:
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
|
||||
enum rt_mmu_cntl cmd);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
151
RT_Thread/libcpu/aarch64/common/include/psci.h
Normal file
151
RT_Thread/libcpu/aarch64/common/include/psci.h
Normal file
@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-09-09 GuEe-GUI The first version
|
||||
*/
|
||||
|
||||
#ifndef __PSCI_H__
|
||||
#define __PSCI_H__
|
||||
|
||||
#include <rtdef.h>
|
||||
|
||||
/*
|
||||
* Non-Confidential PSCI 1.0 release (30 January 2015), and errata fix for PSCI 0.2, unsupport PSCI 0.1
|
||||
*/
|
||||
|
||||
/* PSCI 0.2 interface */
|
||||
#define PSCI_0_2_FN_BASE 0x84000000
|
||||
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
|
||||
#define PSCI_0_2_FN_END 0x8400001F
|
||||
|
||||
#define PSCI_0_2_FN64_BASE 0xC4000000
|
||||
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
|
||||
#define PSCI_0_2_FN64_END 0xC400001F
|
||||
|
||||
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
|
||||
#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
|
||||
#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
|
||||
#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
|
||||
#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4)
|
||||
#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5)
|
||||
#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6)
|
||||
#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7)
|
||||
#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
|
||||
#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9)
|
||||
|
||||
#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
|
||||
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
|
||||
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
|
||||
#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
|
||||
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
|
||||
|
||||
/* PSCI 1.0 interface */
|
||||
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
|
||||
#define PSCI_1_0_FN_CPU_FREEZE PSCI_0_2_FN(11)
|
||||
#define PSCI_1_0_FN_CPU_DEFAULT_SUSPEND PSCI_0_2_FN(12)
|
||||
#define PSCI_1_0_FN_NODE_HW_STATE PSCI_0_2_FN(13)
|
||||
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
|
||||
#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
|
||||
#define PSCI_1_0_FN_STAT_RESIDENCY PSCI_0_2_FN(16)
|
||||
#define PSCI_1_0_FN_STAT_COUNT PSCI_0_2_FN(17)
|
||||
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
|
||||
|
||||
#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
|
||||
#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
|
||||
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
|
||||
#define PSCI_1_0_FN64_STAT_RESIDENCY PSCI_0_2_FN64(16)
|
||||
#define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17)
|
||||
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
|
||||
|
||||
/* PSCI version decoding (independent of PSCI version) */
|
||||
#define PSCI_VERSION_MAJOR_SHIFT 16
|
||||
#define PSCI_VERSION_MINOR_MASK ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
|
||||
#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
|
||||
#define PSCI_VERSION_MAJOR(version) (((version) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
|
||||
#define PSCI_VERSION_MINOR(version) ((version) & PSCI_VERSION_MINOR_MASK)
|
||||
#define PSCI_VERSION(major, min) ((((major) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
|
||||
((min) & PSCI_VERSION_MINOR_MASK))
|
||||
|
||||
/* PSCI affinity level state returned by AFFINITY_INFO */
|
||||
#define PSCI_AFFINITY_LEVEL_ON 0
|
||||
#define PSCI_AFFINITY_LEVEL_OFF 1
|
||||
#define PSCI_AFFINITY_LEVEL_ON_PENDING 2
|
||||
|
||||
/*
|
||||
* PSCI power state
|
||||
* power_level:
|
||||
* Level 0: cores
|
||||
* Level 1: clusters
|
||||
* Level 2: system
|
||||
* state_type:
|
||||
* value 0: standby or retention state
|
||||
* value 1: powerdown state(entry and context_id is valid)
|
||||
* state_id:
|
||||
* StateID
|
||||
*/
|
||||
#define PSCI_POWER_STATE_LEVEL_CORES 0
|
||||
#define PSCI_POWER_STATE_LEVEL_CLUSTERS 1
|
||||
#define PSCI_POWER_STATE_LEVEL_SYSTEM 2
|
||||
|
||||
#define PSCI_POWER_STATE_TYPE_STANDBY 0
|
||||
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
|
||||
|
||||
#define PSCI_POWER_LEVEL_SHIFT 24
|
||||
#define PSCI_POWER_STATE_TYPE_SHIFT 16
|
||||
#define PSCI_POWER_STATE_ID_SHIFT 0
|
||||
#define PSCI_POWER_STATE(power_level, state_type, state_id) \
|
||||
( \
|
||||
((power_level) << PSCI_POWER_LEVEL_SHIFT) | \
|
||||
((state_type) << PSCI_POWER_STATE_TYPE_SHIFT) | \
|
||||
((state_id) << PSCI_POWER_STATE_ID_SHIFT) \
|
||||
)
|
||||
#define PSCI_POWER_LEVEL_VAL(state) (((state) >> PSCI_POWER_LEVEL_SHIFT) & 0x3)
|
||||
#define PSCI_POWER_STATE_TYPE_VAL(state) (((state) >> PSCI_POWER_STATE_TYPE_SHIFT) & 0x1)
|
||||
#define PSCI_POWER_STATE_ID_VAL(state) (((state) >> PSCI_POWER_STATE_ID_SHIFT) & 0xffff)
|
||||
|
||||
/*
|
||||
* For system, cluster, core
|
||||
* 0: run
|
||||
* 1: standby(only core)
|
||||
* 2: retention
|
||||
* 3: powerdown
|
||||
*/
|
||||
#define PSCI_POWER_STATE_ID_RUN 0
|
||||
#define PSCI_POWER_STATE_ID_STANDBY 1
|
||||
#define PSCI_POWER_STATE_ID_RETENTION 2
|
||||
#define PSCI_POWER_STATE_ID_POWERDOWN 3
|
||||
|
||||
#define PSCI_POWER_STATE_ID(state_id_power_level, system, cluster, core) \
|
||||
( \
|
||||
((state_id_power_level) << 12) | \
|
||||
((system) << 8) | \
|
||||
((cluster) << 4) | \
|
||||
(core) \
|
||||
)
|
||||
|
||||
#define PSCI_RET_SUCCESS 0
|
||||
#define PSCI_RET_NOT_SUPPORTED (-1)
|
||||
#define PSCI_RET_INVALID_PARAMETERS (-2)
|
||||
#define PSCI_RET_DENIED (-3)
|
||||
#define PSCI_RET_ALREADY_ON (-4)
|
||||
#define PSCI_RET_ON_PENDING (-5)
|
||||
#define PSCI_RET_INTERNAL_FAILURE (-6)
|
||||
#define PSCI_RET_NOT_PRESENT (-7)
|
||||
#define PSCI_RET_DISABLED (-8)
|
||||
#define PSCI_RET_INVALID_ADDRESS (-9)
|
||||
|
||||
void psci_system_off(void);
|
||||
void psci_system_reboot(void);
|
||||
rt_uint32_t rt_psci_get_version(void);
|
||||
rt_uint32_t rt_psci_cpu_on(int cpuid, rt_ubase_t entry_point);
|
||||
rt_uint32_t rt_psci_cpu_off(rt_uint32_t state);
|
||||
rt_uint32_t rt_psci_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point);
|
||||
rt_uint32_t rt_psci_migrate(int cpuid);
|
||||
rt_uint32_t rt_psci_get_affinity_info(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level);
|
||||
rt_uint32_t rt_psci_migrate_info_type(void);
|
||||
|
||||
#endif /* __PSCI_H__ */
|
||||
22
RT_Thread/libcpu/aarch64/common/include/setup.h
Normal file
22
RT_Thread/libcpu/aarch64/common/include/setup.h
Normal file
@ -0,0 +1,22 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-02-21 GuEe-GUI first version
|
||||
*/
|
||||
|
||||
#ifndef __SETUP_H__
|
||||
#define __SETUP_H__
|
||||
|
||||
#include <rtdef.h>
|
||||
#include <mm_aspace.h>
|
||||
#ifdef RT_USING_OFW
|
||||
#include <drivers/ofw_fdt.h>
|
||||
#endif
|
||||
|
||||
void rt_hw_common_setup(void);
|
||||
|
||||
#endif /* __SETUP_H__ */
|
||||
45
RT_Thread/libcpu/aarch64/common/include/smccc.h
Normal file
45
RT_Thread/libcpu/aarch64/common/include/smccc.h
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2019, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
#ifndef __SMCCC_H__
|
||||
#define __SMCCC_H__
|
||||
|
||||
/**
|
||||
* result from SMC/HVC call
|
||||
* ARM DEN0028E chapter 5,
|
||||
*/
|
||||
typedef struct arm_smccc_res_t
|
||||
{
|
||||
unsigned long a0;
|
||||
// reserved for ARM SMC and HVC Fast Call services
|
||||
unsigned long a1;
|
||||
unsigned long a2;
|
||||
unsigned long a3;
|
||||
} arm_smccc_res_t;
|
||||
|
||||
/**
|
||||
* quirk is a structure contains vendor specified information,
|
||||
* it just a placeholder currently
|
||||
*/
|
||||
struct arm_smccc_quirk_t
|
||||
{
|
||||
};
|
||||
|
||||
/* smccc version 0.2 */
|
||||
|
||||
void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
|
||||
unsigned long a3, unsigned long a4, unsigned long a5,
|
||||
unsigned long a6, unsigned long a7, struct arm_smccc_res_t *res,
|
||||
struct arm_smccc_quirk_t *quirk);
|
||||
|
||||
void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
|
||||
unsigned long a3, unsigned long a4, unsigned long a5,
|
||||
unsigned long a6, unsigned long a7, struct arm_smccc_res_t *res,
|
||||
struct arm_smccc_quirk_t *quirk);
|
||||
|
||||
#endif /* __SMCCC_H__ */
|
||||
91
RT_Thread/libcpu/aarch64/common/include/tlb.h
Normal file
91
RT_Thread/libcpu/aarch64/common/include/tlb.h
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2022-11-28 WangXiaoyao the first version
|
||||
*/
|
||||
#ifndef __TLB_H__
|
||||
#define __TLB_H__
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include "mm_aspace.h"
|
||||
#include "mmu.h"
|
||||
|
||||
#define TLBI_ARG(addr, asid) \
|
||||
({ \
|
||||
rt_ubase_t arg = (rt_ubase_t)(addr) >> ARCH_PAGE_SHIFT; \
|
||||
arg &= (1ull << 44) - 1; \
|
||||
arg |= (rt_ubase_t)(asid) << MMU_ASID_SHIFT; \
|
||||
(void *)arg; \
|
||||
})
|
||||
|
||||
static inline void rt_hw_tlb_invalidate_all(void)
|
||||
{
|
||||
__asm__ volatile(
|
||||
// ensure updates to pte completed
|
||||
"dsb ishst\n"
|
||||
"tlbi vmalle1is\n"
|
||||
"dsb ish\n"
|
||||
// after tlb in new context, refresh inst
|
||||
"isb\n" ::
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void rt_hw_tlb_invalidate_all_local(void)
|
||||
{
|
||||
__asm__ volatile(
|
||||
// ensure updates to pte completed
|
||||
"dsb nshst\n"
|
||||
"tlbi vmalle1is\n"
|
||||
"dsb nsh\n"
|
||||
// after tlb in new context, refresh inst
|
||||
"isb\n" ::
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void rt_hw_tlb_invalidate_aspace(rt_aspace_t aspace)
|
||||
{
|
||||
#ifdef ARCH_USING_ASID
|
||||
__asm__ volatile(
|
||||
// ensure updates to pte completed
|
||||
"dsb nshst\n"
|
||||
"tlbi aside1is, %0\n"
|
||||
"dsb nsh\n"
|
||||
// after tlb in new context, refresh inst
|
||||
"isb\n" ::"r"(TLBI_ARG(0ul, aspace->asid))
|
||||
: "memory");
|
||||
#else
|
||||
rt_hw_tlb_invalidate_all();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
|
||||
{
|
||||
start = TLBI_ARG(start, 0);
|
||||
__asm__ volatile(
|
||||
"dsb ishst\n"
|
||||
"tlbi vaae1is, %0\n"
|
||||
"dsb ish\n"
|
||||
"isb\n" ::"r"(start)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void rt_hw_tlb_invalidate_range(rt_aspace_t aspace, void *start,
|
||||
size_t size, size_t stride)
|
||||
{
|
||||
if (size <= ARCH_PAGE_SIZE)
|
||||
{
|
||||
rt_hw_tlb_invalidate_page(aspace, start);
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_hw_tlb_invalidate_aspace(aspace);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __TLB_H__ */
|
||||
128
RT_Thread/libcpu/aarch64/common/include/vector_gcc.h
Normal file
128
RT_Thread/libcpu/aarch64/common/include/vector_gcc.h
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
|
||||
* 2024-04-08 Shell Optimizing exception switch between u-space/kernel,
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_INC_VECTOR_H__
|
||||
#define __ARM64_INC_VECTOR_H__
|
||||
|
||||
#include "asm-generic.h"
|
||||
|
||||
#include <rtconfig.h>
|
||||
#include <asm-fpu.h>
|
||||
#include <armv8.h>
|
||||
|
||||
.macro SAVE_IRQ_CONTEXT
|
||||
/* Save the entire context. */
|
||||
SAVE_FPU sp
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
stp x2, x3, [sp, #-0x10]!
|
||||
stp x4, x5, [sp, #-0x10]!
|
||||
stp x6, x7, [sp, #-0x10]!
|
||||
stp x8, x9, [sp, #-0x10]!
|
||||
stp x10, x11, [sp, #-0x10]!
|
||||
stp x12, x13, [sp, #-0x10]!
|
||||
stp x14, x15, [sp, #-0x10]!
|
||||
stp x16, x17, [sp, #-0x10]!
|
||||
stp x18, x19, [sp, #-0x10]!
|
||||
stp x20, x21, [sp, #-0x10]!
|
||||
stp x22, x23, [sp, #-0x10]!
|
||||
stp x24, x25, [sp, #-0x10]!
|
||||
stp x26, x27, [sp, #-0x10]!
|
||||
stp x28, x29, [sp, #-0x10]!
|
||||
mrs x28, fpcr
|
||||
mrs x29, fpsr
|
||||
stp x28, x29, [sp, #-0x10]!
|
||||
mrs x29, sp_el0
|
||||
stp x29, x30, [sp, #-0x10]!
|
||||
|
||||
mrs x3, spsr_el1
|
||||
mrs x2, elr_el1
|
||||
|
||||
stp x2, x3, [sp, #-0x10]!
|
||||
.endm
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
#include "../mp/context_gcc.h"
|
||||
#else
|
||||
#include "../up/context_gcc.h"
|
||||
#endif
|
||||
|
||||
.macro RESTORE_IRQ_CONTEXT_NO_SPEL0
|
||||
ldp x2, x3, [sp], #0x10
|
||||
msr elr_el1, x2
|
||||
msr spsr_el1, x3
|
||||
|
||||
ldp x29, x30, [sp], #0x10
|
||||
|
||||
ldp x28, x29, [sp], #0x10
|
||||
msr fpcr, x28
|
||||
msr fpsr, x29
|
||||
|
||||
ldp x28, x29, [sp], #0x10
|
||||
ldp x26, x27, [sp], #0x10
|
||||
ldp x24, x25, [sp], #0x10
|
||||
ldp x22, x23, [sp], #0x10
|
||||
ldp x20, x21, [sp], #0x10
|
||||
ldp x18, x19, [sp], #0x10
|
||||
ldp x16, x17, [sp], #0x10
|
||||
ldp x14, x15, [sp], #0x10
|
||||
ldp x12, x13, [sp], #0x10
|
||||
ldp x10, x11, [sp], #0x10
|
||||
ldp x8, x9, [sp], #0x10
|
||||
ldp x6, x7, [sp], #0x10
|
||||
ldp x4, x5, [sp], #0x10
|
||||
ldp x2, x3, [sp], #0x10
|
||||
ldp x0, x1, [sp], #0x10
|
||||
|
||||
RESTORE_FPU sp
|
||||
.endm
|
||||
|
||||
.macro EXCEPTION_SWITCH, eframex, tmpx
|
||||
#ifdef RT_USING_SMART
|
||||
/**
|
||||
* test the spsr for execution level 0
|
||||
* That is { PSTATE.[NZCV] := SPSR_EL1 & M.EL0t }
|
||||
*/
|
||||
ldr \tmpx, [\eframex, #CONTEXT_OFFSET_SPSR_EL1]
|
||||
and \tmpx, \tmpx, 0x1f
|
||||
cbz \tmpx, 1f
|
||||
b 2f
|
||||
1:
|
||||
b arch_ret_to_user
|
||||
2:
|
||||
#endif /* RT_USING_SMART */
|
||||
.endm
|
||||
|
||||
.macro SAVE_USER_CTX, eframex, tmpx
|
||||
#ifdef RT_USING_SMART
|
||||
mrs \tmpx, spsr_el1
|
||||
and \tmpx, \tmpx, 0xf
|
||||
cbz \tmpx, 1f
|
||||
b 2f
|
||||
1:
|
||||
mov x0, \eframex
|
||||
bl lwp_uthread_ctx_save
|
||||
2:
|
||||
#endif /* RT_USING_SMART */
|
||||
.endm
|
||||
|
||||
.macro RESTORE_USER_CTX, eframex, tmpx
|
||||
#ifdef RT_USING_SMART
|
||||
ldr \tmpx, [\eframex, #CONTEXT_OFFSET_SPSR_EL1]
|
||||
and \tmpx, \tmpx, 0x1f
|
||||
cbz \tmpx, 1f
|
||||
b 2f
|
||||
1:
|
||||
bl lwp_uthread_ctx_restore
|
||||
2:
|
||||
#endif /* RT_USING_SMART */
|
||||
.endm
|
||||
|
||||
#endif /* __ARM64_INC_VECTOR_H__ */
|
||||
451
RT_Thread/libcpu/aarch64/common/interrupt.c
Normal file
451
RT_Thread/libcpu/aarch64/common/interrupt.c
Normal file
@ -0,0 +1,451 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2013-07-06 Bernard first version
|
||||
* 2018-11-22 Jesven add smp support
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include "interrupt.h"
|
||||
#include "gic.h"
|
||||
#include "gicv3.h"
|
||||
#include "ioremap.h"
|
||||
|
||||
/* exception and interrupt handler table */
|
||||
struct rt_irq_desc isr_table[MAX_HANDLERS];
|
||||
|
||||
#ifndef RT_CPUS_NR
|
||||
#define RT_CPUS_NR 1
|
||||
#endif
|
||||
|
||||
const unsigned int VECTOR_BASE = 0x00;
|
||||
extern void rt_cpu_vector_set_base(void *addr);
|
||||
extern void *system_vectors;
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
#define rt_interrupt_nest rt_cpu_self()->irq_nest
|
||||
#else
|
||||
extern volatile rt_atomic_t rt_interrupt_nest;
|
||||
#endif
|
||||
|
||||
#ifdef SOC_BCM283x
|
||||
static void default_isr_handler(int vector, void *param)
|
||||
{
|
||||
#ifdef RT_USING_SMP
|
||||
rt_kprintf("cpu %d unhandled irq: %d\n", rt_hw_cpu_id(),vector);
|
||||
#else
|
||||
rt_kprintf("unhandled irq: %d\n",vector);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void rt_hw_vector_init(void)
|
||||
{
|
||||
rt_cpu_vector_set_base(&system_vectors);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function will initialize hardware interrupt
|
||||
*/
|
||||
void rt_hw_interrupt_init(void)
|
||||
{
|
||||
#ifdef SOC_BCM283x
|
||||
rt_uint32_t index;
|
||||
/* initialize vector table */
|
||||
rt_hw_vector_init();
|
||||
|
||||
/* initialize exceptions table */
|
||||
rt_memset(isr_table, 0x00, sizeof(isr_table));
|
||||
|
||||
/* mask all of interrupts */
|
||||
IRQ_DISABLE_BASIC = 0x000000ff;
|
||||
IRQ_DISABLE1 = 0xffffffff;
|
||||
IRQ_DISABLE2 = 0xffffffff;
|
||||
for (index = 0; index < MAX_HANDLERS; index ++)
|
||||
{
|
||||
isr_table[index].handler = default_isr_handler;
|
||||
isr_table[index].param = RT_NULL;
|
||||
#ifdef RT_USING_INTERRUPT_INFO
|
||||
rt_strncpy(isr_table[index].name, "unknown", RT_NAME_MAX);
|
||||
isr_table[index].counter = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* init interrupt nest, and context in thread sp */
|
||||
rt_atomic_store(&rt_interrupt_nest, 0);
|
||||
#else
|
||||
rt_uint64_t gic_cpu_base;
|
||||
rt_uint64_t gic_dist_base;
|
||||
#ifdef BSP_USING_GICV3
|
||||
rt_uint64_t gic_rdist_base;
|
||||
#endif
|
||||
rt_uint64_t gic_irq_start;
|
||||
|
||||
/* initialize vector table */
|
||||
rt_hw_vector_init();
|
||||
|
||||
/* initialize exceptions table */
|
||||
rt_memset(isr_table, 0x00, sizeof(isr_table));
|
||||
|
||||
/* initialize ARM GIC */
|
||||
#if defined(RT_USING_SMART) || defined(RT_USING_OFW)
|
||||
gic_dist_base = (rt_uint64_t)rt_ioremap((void*)platform_get_gic_dist_base(), 0x40000);
|
||||
gic_cpu_base = (rt_uint64_t)rt_ioremap((void*)platform_get_gic_cpu_base(), 0x1000);
|
||||
#ifdef BSP_USING_GICV3
|
||||
gic_rdist_base = (rt_uint64_t)rt_ioremap((void*)platform_get_gic_redist_base(),
|
||||
ARM_GIC_CPU_NUM * (2 << 16));
|
||||
#endif
|
||||
#else
|
||||
gic_dist_base = platform_get_gic_dist_base();
|
||||
gic_cpu_base = platform_get_gic_cpu_base();
|
||||
#ifdef BSP_USING_GICV3
|
||||
gic_rdist_base = platform_get_gic_redist_base();
|
||||
#endif
|
||||
#endif
|
||||
|
||||
gic_irq_start = GIC_IRQ_START;
|
||||
|
||||
arm_gic_dist_init(0, gic_dist_base, gic_irq_start);
|
||||
arm_gic_cpu_init(0, gic_cpu_base);
|
||||
#ifdef BSP_USING_GICV3
|
||||
arm_gic_redist_init(0, gic_rdist_base);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* This function will mask a interrupt.
|
||||
* @param vector the interrupt number
|
||||
*/
|
||||
void rt_hw_interrupt_mask(int vector)
|
||||
{
|
||||
#ifdef SOC_BCM283x
|
||||
if (vector < 32)
|
||||
{
|
||||
IRQ_DISABLE1 = (1UL << vector);
|
||||
}
|
||||
else if (vector < 64)
|
||||
{
|
||||
vector = vector % 32;
|
||||
IRQ_DISABLE2 = (1UL << vector);
|
||||
}
|
||||
else
|
||||
{
|
||||
vector = vector - 64;
|
||||
IRQ_DISABLE_BASIC = (1UL << vector);
|
||||
}
|
||||
#else
|
||||
arm_gic_mask(0, vector);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* This function will un-mask a interrupt.
|
||||
* @param vector the interrupt number
|
||||
*/
|
||||
void rt_hw_interrupt_umask(int vector)
|
||||
{
|
||||
#ifdef SOC_BCM283x
|
||||
if (vector < 32)
|
||||
{
|
||||
IRQ_ENABLE1 = (1UL << vector);
|
||||
}
|
||||
else if (vector < 64)
|
||||
{
|
||||
vector = vector % 32;
|
||||
IRQ_ENABLE2 = (1UL << vector);
|
||||
}
|
||||
else
|
||||
{
|
||||
vector = vector - 64;
|
||||
IRQ_ENABLE_BASIC = (1UL << vector);
|
||||
}
|
||||
#else
|
||||
arm_gic_umask(0, vector);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* This function returns the active interrupt number.
|
||||
* @param none
|
||||
*/
|
||||
int rt_hw_interrupt_get_irq(void)
|
||||
{
|
||||
#ifndef SOC_BCM283x
|
||||
return arm_gic_get_active_irq(0);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* This function acknowledges the interrupt.
|
||||
* @param vector the interrupt number
|
||||
*/
|
||||
void rt_hw_interrupt_ack(int vector)
|
||||
{
|
||||
#ifndef SOC_BCM283x
|
||||
arm_gic_ack(0, vector);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef SOC_BCM283x
|
||||
/**
|
||||
* This function set interrupt CPU targets.
|
||||
* @param vector: the interrupt number
|
||||
* cpu_mask: target cpus mask, one bit for one core
|
||||
*/
|
||||
void rt_hw_interrupt_set_target_cpus(int vector, unsigned long cpu_mask)
|
||||
{
|
||||
#ifdef BSP_USING_GIC
|
||||
#ifdef BSP_USING_GICV3
|
||||
arm_gic_set_router_cpu(0, vector, cpu_mask);
|
||||
#else
|
||||
arm_gic_set_cpu(0, vector, (unsigned int) cpu_mask);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* This function get interrupt CPU targets.
|
||||
* @param vector: the interrupt number
|
||||
* @return target cpus mask, one bit for one core
|
||||
*/
|
||||
unsigned int rt_hw_interrupt_get_target_cpus(int vector)
|
||||
{
|
||||
return arm_gic_get_target_cpu(0, vector);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function set interrupt triger mode.
|
||||
* @param vector: the interrupt number
|
||||
* mode: interrupt triger mode; 0: level triger, 1: edge triger
|
||||
*/
|
||||
void rt_hw_interrupt_set_triger_mode(int vector, unsigned int mode)
|
||||
{
|
||||
arm_gic_set_configuration(0, vector, mode & IRQ_MODE_MASK);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function get interrupt triger mode.
|
||||
* @param vector: the interrupt number
|
||||
* @return interrupt triger mode; 0: level triger, 1: edge triger
|
||||
*/
|
||||
unsigned int rt_hw_interrupt_get_triger_mode(int vector)
|
||||
{
|
||||
return arm_gic_get_configuration(0, vector);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function set interrupt pending flag.
|
||||
* @param vector: the interrupt number
|
||||
*/
|
||||
void rt_hw_interrupt_set_pending(int vector)
|
||||
{
|
||||
arm_gic_set_pending_irq(0, vector);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function get interrupt pending flag.
|
||||
* @param vector: the interrupt number
|
||||
* @return interrupt pending flag, 0: not pending; 1: pending
|
||||
*/
|
||||
unsigned int rt_hw_interrupt_get_pending(int vector)
|
||||
{
|
||||
return arm_gic_get_pending_irq(0, vector);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function clear interrupt pending flag.
|
||||
* @param vector: the interrupt number
|
||||
*/
|
||||
void rt_hw_interrupt_clear_pending(int vector)
|
||||
{
|
||||
arm_gic_clear_pending_irq(0, vector);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function set interrupt priority value.
|
||||
* @param vector: the interrupt number
|
||||
* priority: the priority of interrupt to set
|
||||
*/
|
||||
void rt_hw_interrupt_set_priority(int vector, unsigned int priority)
|
||||
{
|
||||
arm_gic_set_priority(0, vector, priority);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function get interrupt priority.
|
||||
* @param vector: the interrupt number
|
||||
* @return interrupt priority value
|
||||
*/
|
||||
unsigned int rt_hw_interrupt_get_priority(int vector)
|
||||
{
|
||||
return arm_gic_get_priority(0, vector);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function set priority masking threshold.
|
||||
* @param priority: priority masking threshold
|
||||
*/
|
||||
void rt_hw_interrupt_set_priority_mask(unsigned int priority)
|
||||
{
|
||||
arm_gic_set_interface_prior_mask(0, priority);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function get priority masking threshold.
|
||||
* @param none
|
||||
* @return priority masking threshold
|
||||
*/
|
||||
unsigned int rt_hw_interrupt_get_priority_mask(void)
|
||||
{
|
||||
return arm_gic_get_interface_prior_mask(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function set priority grouping field split point.
|
||||
* @param bits: priority grouping field split point
|
||||
* @return 0: success; -1: failed
|
||||
*/
|
||||
int rt_hw_interrupt_set_prior_group_bits(unsigned int bits)
|
||||
{
|
||||
int status;
|
||||
|
||||
if (bits < 8)
|
||||
{
|
||||
arm_gic_set_binary_point(0, (7 - bits));
|
||||
status = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = -1;
|
||||
}
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function get priority grouping field split point.
|
||||
* @param none
|
||||
* @return priority grouping field split point
|
||||
*/
|
||||
unsigned int rt_hw_interrupt_get_prior_group_bits(void)
|
||||
{
|
||||
unsigned int bp;
|
||||
|
||||
bp = arm_gic_get_binary_point(0) & 0x07;
|
||||
|
||||
return (7 - bp);
|
||||
}
|
||||
#endif /* SOC_BCM283x */
|
||||
|
||||
/**
|
||||
* This function will install a interrupt service routine to a interrupt.
|
||||
* @param vector the interrupt number
|
||||
* @param new_handler the interrupt service routine to be installed
|
||||
* @param old_handler the old interrupt service routine
|
||||
*/
|
||||
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
|
||||
void *param, const char *name)
|
||||
{
|
||||
rt_isr_handler_t old_handler = RT_NULL;
|
||||
|
||||
if (vector < MAX_HANDLERS)
|
||||
{
|
||||
old_handler = isr_table[vector].handler;
|
||||
|
||||
if (handler != RT_NULL)
|
||||
{
|
||||
#ifdef RT_USING_INTERRUPT_INFO
|
||||
rt_strncpy(isr_table[vector].name, name, RT_NAME_MAX);
|
||||
#endif /* RT_USING_INTERRUPT_INFO */
|
||||
isr_table[vector].handler = handler;
|
||||
isr_table[vector].param = param;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef BSP_USING_GIC
|
||||
if (vector > 32)
|
||||
{
|
||||
#ifdef BSP_USING_GICV3
|
||||
rt_uint64_t cpu_affinity_val;
|
||||
__asm__ volatile ("mrs %0, mpidr_el1":"=r"(cpu_affinity_val));
|
||||
rt_hw_interrupt_set_target_cpus(vector, cpu_affinity_val);
|
||||
#else
|
||||
rt_hw_interrupt_set_target_cpus(vector, 1 << rt_hw_cpu_id());
|
||||
#endif /* BSP_USING_GICV3 */
|
||||
}
|
||||
#endif
|
||||
|
||||
return old_handler;
|
||||
}
|
||||
|
||||
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
|
||||
void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)
|
||||
{
|
||||
#ifdef BSP_USING_GICV2
|
||||
arm_gic_send_sgi(0, ipi_vector, cpu_mask, 0);
|
||||
#elif defined(BSP_USING_GICV3)
|
||||
rt_uint32_t gicv3_cpu_mask[(RT_CPUS_NR + 31) >> 5];
|
||||
gicv3_cpu_mask[0] = cpu_mask;
|
||||
arm_gic_send_affinity_sgi(0, ipi_vector, gicv3_cpu_mask, GICV3_ROUTED_TO_SPEC);
|
||||
#endif
|
||||
}
|
||||
|
||||
void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler)
|
||||
{
|
||||
/* note: ipi_vector maybe different with irq_vector */
|
||||
rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER");
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(FINSH_USING_MSH) && defined(RT_USING_INTERRUPT_INFO)
|
||||
int list_isr()
|
||||
{
|
||||
int idx;
|
||||
|
||||
rt_kprintf("%-*.*s nr handler param counter ", RT_NAME_MAX, RT_NAME_MAX, "irq");
|
||||
#ifdef RT_USING_SMP
|
||||
for (int i = 0; i < RT_CPUS_NR; i++)
|
||||
{
|
||||
rt_kprintf(" cpu%2d ", i);
|
||||
}
|
||||
#endif
|
||||
rt_kprintf("\n");
|
||||
for (int i = 0; i < RT_NAME_MAX; i++)
|
||||
{
|
||||
rt_kprintf("-");
|
||||
}
|
||||
rt_kprintf(" ---- ------------------ ------------------ ----------------");
|
||||
#ifdef RT_USING_SMP
|
||||
for (int i = 0; i < RT_CPUS_NR; i++)
|
||||
{
|
||||
rt_kprintf(" -------");
|
||||
}
|
||||
#endif
|
||||
rt_kprintf("\n");
|
||||
for (idx = 0; idx < MAX_HANDLERS; idx++)
|
||||
{
|
||||
if (isr_table[idx].handler != RT_NULL)
|
||||
{
|
||||
rt_kprintf("%*.s %4d %p %p %16d", RT_NAME_MAX, isr_table[idx].name, idx, isr_table[idx].handler,
|
||||
isr_table[idx].param, isr_table[idx].counter);
|
||||
#ifdef RT_USING_SMP
|
||||
for (int i = 0; i < RT_CPUS_NR; i++)
|
||||
rt_kprintf(" %7d", isr_table[idx].cpu_counter[i]);
|
||||
#endif
|
||||
rt_kprintf("\n");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include "finsh.h"
|
||||
MSH_CMD_EXPORT(list_isr, list isr)
|
||||
#endif
|
||||
953
RT_Thread/libcpu/aarch64/common/mmu.c
Normal file
953
RT_Thread/libcpu/aarch64/common/mmu.c
Normal file
@ -0,0 +1,953 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2012-01-10 bernard porting to AM1808
|
||||
* 2021-11-28 GuEe-GUI first version
|
||||
* 2022-12-10 WangXiaoyao porting to MM
|
||||
* 2024-07-08 Shell added support for ASID
|
||||
*/
|
||||
|
||||
#define DBG_TAG "hw.mmu"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#define __MMU_INTERNAL
|
||||
|
||||
#include "mm_aspace.h"
|
||||
#include "mm_page.h"
|
||||
#include "mmu.h"
|
||||
#include "tlb.h"
|
||||
|
||||
#include "ioremap.h"
|
||||
#ifdef RT_USING_SMART
|
||||
#include <lwp_mm.h>
|
||||
#endif
|
||||
|
||||
#define TCR_CONFIG_TBI0 rt_hw_mmu_config_tbi(0)
|
||||
#define TCR_CONFIG_TBI1 rt_hw_mmu_config_tbi(1)
|
||||
|
||||
#define MMU_LEVEL_MASK 0x1ffUL
|
||||
#define MMU_LEVEL_SHIFT 9
|
||||
#define MMU_ADDRESS_BITS 39
|
||||
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
|
||||
#define MMU_ATTRIB_MASK 0xfff0000000000ffcUL
|
||||
|
||||
#define MMU_TYPE_MASK 3UL
|
||||
#define MMU_TYPE_USED 1UL
|
||||
#define MMU_TYPE_BLOCK 1UL
|
||||
#define MMU_TYPE_TABLE 3UL
|
||||
#define MMU_TYPE_PAGE 3UL
|
||||
|
||||
#define MMU_TBL_BLOCK_2M_LEVEL 2
|
||||
#define MMU_TBL_PAGE_4k_LEVEL 3
|
||||
#define MMU_TBL_LEVEL_NR 4
|
||||
|
||||
/* restrict virtual address on usage of RT_NULL */
|
||||
#ifndef KERNEL_VADDR_START
|
||||
#define KERNEL_VADDR_START 0x1000
|
||||
#endif
|
||||
|
||||
volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
|
||||
|
||||
struct mmu_level_info
|
||||
{
|
||||
unsigned long *pos;
|
||||
void *page;
|
||||
};
|
||||
|
||||
static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
|
||||
{
|
||||
int level;
|
||||
unsigned long va = (unsigned long)v_addr;
|
||||
unsigned long *cur_lv_tbl = lv0_tbl;
|
||||
unsigned long page;
|
||||
unsigned long off;
|
||||
struct mmu_level_info level_info[4];
|
||||
int ref;
|
||||
int level_shift = MMU_ADDRESS_BITS;
|
||||
unsigned long *pos;
|
||||
|
||||
rt_memset(level_info, 0, sizeof level_info);
|
||||
for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
|
||||
{
|
||||
off = (va >> level_shift);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
page = cur_lv_tbl[off];
|
||||
if (!(page & MMU_TYPE_USED))
|
||||
{
|
||||
break;
|
||||
}
|
||||
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
||||
{
|
||||
break;
|
||||
}
|
||||
/* next table entry in current level */
|
||||
level_info[level].pos = cur_lv_tbl + off;
|
||||
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
||||
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
|
||||
level_info[level].page = cur_lv_tbl;
|
||||
level_shift -= MMU_LEVEL_SHIFT;
|
||||
}
|
||||
|
||||
level = MMU_TBL_PAGE_4k_LEVEL;
|
||||
pos = level_info[level].pos;
|
||||
if (pos)
|
||||
{
|
||||
*pos = (unsigned long)RT_NULL;
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
|
||||
}
|
||||
level--;
|
||||
|
||||
while (level >= 0)
|
||||
{
|
||||
pos = level_info[level].pos;
|
||||
if (pos)
|
||||
{
|
||||
void *cur_page = level_info[level].page;
|
||||
ref = rt_page_ref_get(cur_page, 0);
|
||||
if (ref == 1)
|
||||
{
|
||||
*pos = (unsigned long)RT_NULL;
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
|
||||
}
|
||||
rt_pages_free(cur_page, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
level--;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
|
||||
{
|
||||
int ret = 0;
|
||||
int level;
|
||||
unsigned long *cur_lv_tbl = lv0_tbl;
|
||||
unsigned long page;
|
||||
unsigned long off;
|
||||
rt_ubase_t va = (rt_ubase_t)vaddr;
|
||||
rt_ubase_t pa = (rt_ubase_t)paddr;
|
||||
int level_shift = MMU_ADDRESS_BITS;
|
||||
|
||||
if (va & ARCH_PAGE_MASK)
|
||||
{
|
||||
return MMU_MAP_ERROR_VANOTALIGN;
|
||||
}
|
||||
if (pa & ARCH_PAGE_MASK)
|
||||
{
|
||||
return MMU_MAP_ERROR_PANOTALIGN;
|
||||
}
|
||||
for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
|
||||
{
|
||||
off = (va >> level_shift);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
|
||||
{
|
||||
page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!page)
|
||||
{
|
||||
ret = MMU_MAP_ERROR_NOPAGE;
|
||||
goto err;
|
||||
}
|
||||
rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
|
||||
cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
||||
}
|
||||
else
|
||||
{
|
||||
page = cur_lv_tbl[off];
|
||||
page &= MMU_ADDRESS_MASK;
|
||||
/* page to va */
|
||||
page -= PV_OFFSET;
|
||||
rt_page_ref_inc((void *)page, 0);
|
||||
}
|
||||
page = cur_lv_tbl[off];
|
||||
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
||||
{
|
||||
/* is block! error! */
|
||||
ret = MMU_MAP_ERROR_CONFLICT;
|
||||
goto err;
|
||||
}
|
||||
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
||||
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
|
||||
level_shift -= MMU_LEVEL_SHIFT;
|
||||
}
|
||||
/* now is level page */
|
||||
attr &= MMU_ATTRIB_MASK;
|
||||
pa |= (attr | MMU_TYPE_PAGE); /* page */
|
||||
off = (va >> ARCH_PAGE_SHIFT);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
cur_lv_tbl[off] = pa; /* page */
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
||||
return ret;
|
||||
err:
|
||||
_kenrel_unmap_4K(lv0_tbl, (void *)va);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
|
||||
{
|
||||
int ret = 0;
|
||||
int level;
|
||||
unsigned long *cur_lv_tbl = lv0_tbl;
|
||||
unsigned long page;
|
||||
unsigned long off;
|
||||
unsigned long va = (unsigned long)vaddr;
|
||||
unsigned long pa = (unsigned long)paddr;
|
||||
|
||||
int level_shift = MMU_ADDRESS_BITS;
|
||||
|
||||
if (va & ARCH_SECTION_MASK)
|
||||
{
|
||||
return MMU_MAP_ERROR_VANOTALIGN;
|
||||
}
|
||||
if (pa & ARCH_PAGE_MASK)
|
||||
{
|
||||
return MMU_MAP_ERROR_PANOTALIGN;
|
||||
}
|
||||
for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
|
||||
{
|
||||
off = (va >> level_shift);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
|
||||
{
|
||||
page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!page)
|
||||
{
|
||||
ret = MMU_MAP_ERROR_NOPAGE;
|
||||
goto err;
|
||||
}
|
||||
rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
|
||||
cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
||||
}
|
||||
else
|
||||
{
|
||||
page = cur_lv_tbl[off];
|
||||
page &= MMU_ADDRESS_MASK;
|
||||
/* page to va */
|
||||
page -= PV_OFFSET;
|
||||
rt_page_ref_inc((void *)page, 0);
|
||||
}
|
||||
page = cur_lv_tbl[off];
|
||||
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
||||
{
|
||||
/* is block! error! */
|
||||
ret = MMU_MAP_ERROR_CONFLICT;
|
||||
goto err;
|
||||
}
|
||||
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
||||
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
|
||||
level_shift -= MMU_LEVEL_SHIFT;
|
||||
}
|
||||
/* now is level page */
|
||||
attr &= MMU_ATTRIB_MASK;
|
||||
pa |= (attr | MMU_TYPE_BLOCK); /* block */
|
||||
off = (va >> ARCH_SECTION_SHIFT);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
cur_lv_tbl[off] = pa;
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
||||
return ret;
|
||||
err:
|
||||
_kenrel_unmap_4K(lv0_tbl, (void *)va);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
|
||||
size_t attr)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
void *unmap_va = v_addr;
|
||||
size_t remaining_sz = size;
|
||||
size_t stride;
|
||||
int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);
|
||||
|
||||
RT_ASSERT(!(size & ARCH_PAGE_MASK));
|
||||
|
||||
while (remaining_sz)
|
||||
{
|
||||
if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (remaining_sz < ARCH_SECTION_SIZE))
|
||||
{
|
||||
/* legacy 4k mapping */
|
||||
stride = ARCH_PAGE_SIZE;
|
||||
mapper = _kernel_map_4K;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* 2m huge page */
|
||||
stride = ARCH_SECTION_SIZE;
|
||||
mapper = _kernel_map_2M;
|
||||
}
|
||||
|
||||
/* check aliasing */
|
||||
#ifdef RT_DEBUGGING_ALIASING
|
||||
#define _ALIAS_OFFSET(addr) ((long)(addr) & (RT_PAGE_AFFINITY_BLOCK_SIZE - 1))
|
||||
if (rt_page_is_member((rt_base_t)p_addr) && _ALIAS_OFFSET(v_addr) != _ALIAS_OFFSET(p_addr))
|
||||
{
|
||||
LOG_W("Possibly aliasing on va(0x%lx) to pa(0x%lx)", v_addr, p_addr);
|
||||
rt_backtrace();
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
#endif /* RT_DEBUGGING_ALIASING */
|
||||
|
||||
MM_PGTBL_LOCK(aspace);
|
||||
ret = mapper(aspace->page_table, v_addr, p_addr, attr);
|
||||
MM_PGTBL_UNLOCK(aspace);
|
||||
|
||||
if (ret != 0)
|
||||
{
|
||||
/* other types of return value are taken as programming error */
|
||||
RT_ASSERT(ret == MMU_MAP_ERROR_NOPAGE);
|
||||
/* error, undo map */
|
||||
while (unmap_va != v_addr)
|
||||
{
|
||||
MM_PGTBL_LOCK(aspace);
|
||||
_kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
|
||||
MM_PGTBL_UNLOCK(aspace);
|
||||
unmap_va = (char *)unmap_va + stride;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
remaining_sz -= stride;
|
||||
v_addr = (char *)v_addr + stride;
|
||||
p_addr = (char *)p_addr + stride;
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
{
|
||||
return unmap_va;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
|
||||
{
|
||||
// caller guarantee that v_addr & size are page aligned
|
||||
size_t npages = size >> ARCH_PAGE_SHIFT;
|
||||
|
||||
if (!aspace->page_table)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
while (npages--)
|
||||
{
|
||||
MM_PGTBL_LOCK(aspace);
|
||||
if (rt_hw_mmu_v2p(aspace, v_addr) != ARCH_MAP_FAILED)
|
||||
_kenrel_unmap_4K(aspace->page_table, v_addr);
|
||||
MM_PGTBL_UNLOCK(aspace);
|
||||
v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ARCH_USING_ASID
|
||||
/**
|
||||
* the asid is to identified specialized address space on TLB.
|
||||
* In the best case, each address space has its own exclusive asid. However,
|
||||
* ARM only guarantee with 8 bits of ID space, which give us only 254(except
|
||||
* the reserved 1 ASID for kernel).
|
||||
*/
|
||||
|
||||
static rt_spinlock_t _asid_lock = RT_SPINLOCK_INIT;
|
||||
|
||||
rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
|
||||
{
|
||||
static rt_uint16_t _asid_pool = 0;
|
||||
rt_uint16_t asid_to, asid_from;
|
||||
rt_ubase_t ttbr0_from;
|
||||
|
||||
asid_to = aspace->asid;
|
||||
if (asid_to == 0)
|
||||
{
|
||||
rt_spin_lock(&_asid_lock);
|
||||
#define MAX_ASID (1ul << MMU_SUPPORTED_ASID_BITS)
|
||||
if (_asid_pool && _asid_pool < MAX_ASID)
|
||||
{
|
||||
asid_to = ++_asid_pool;
|
||||
LOG_D("Allocated ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
|
||||
}
|
||||
else
|
||||
{
|
||||
asid_to = _asid_pool = 1;
|
||||
LOG_D("Overflowed ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
|
||||
}
|
||||
|
||||
rt_spin_unlock(&_asid_lock);
|
||||
|
||||
aspace->asid = asid_to;
|
||||
rt_hw_tlb_invalidate_aspace(aspace);
|
||||
}
|
||||
|
||||
__asm__ volatile("mrs %0, ttbr0_el1" :"=r"(ttbr0_from));
|
||||
asid_from = ttbr0_from >> MMU_ASID_SHIFT;
|
||||
if (asid_from == asid_to)
|
||||
{
|
||||
LOG_D("Conflict ASID. from %d, to %d", asid_from, asid_to);
|
||||
rt_hw_tlb_invalidate_aspace(aspace);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_D("ASID switched. from %d, to %d", asid_from, asid_to);
|
||||
}
|
||||
|
||||
return asid_to;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
|
||||
rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
|
||||
{
|
||||
rt_hw_tlb_invalidate_all();
|
||||
return 0;
|
||||
}
|
||||
#endif /* ARCH_USING_ASID */
|
||||
|
||||
#define CREATE_TTBR0(pgtbl, asid) ((rt_ubase_t)(pgtbl) | (rt_ubase_t)(asid) << MMU_ASID_SHIFT)
|
||||
void rt_hw_aspace_switch(rt_aspace_t aspace)
|
||||
{
|
||||
if (aspace != &rt_kernel_space)
|
||||
{
|
||||
rt_ubase_t ttbr0;
|
||||
void *pgtbl = aspace->page_table;
|
||||
pgtbl = rt_kmem_v2p(pgtbl);
|
||||
|
||||
ttbr0 = CREATE_TTBR0(pgtbl, _aspace_get_asid(aspace));
|
||||
|
||||
__asm__ volatile("msr ttbr0_el1, %0" ::"r"(ttbr0));
|
||||
__asm__ volatile("isb" ::: "memory");
|
||||
}
|
||||
}
|
||||
|
||||
void rt_hw_mmu_ktbl_set(unsigned long tbl)
|
||||
{
|
||||
#ifdef RT_USING_SMART
|
||||
tbl += PV_OFFSET;
|
||||
__asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
|
||||
#else
|
||||
__asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
|
||||
#endif
|
||||
__asm__ volatile("tlbi vmalle1\n dsb sy\nisb" ::: "memory");
|
||||
__asm__ volatile("ic ialluis\n dsb sy\nisb" ::: "memory");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief setup Page Table for kernel space. It's a fixed map
|
||||
* and all mappings cannot be changed after initialization.
|
||||
*
|
||||
* Memory region in struct mem_desc must be page aligned,
|
||||
* otherwise is a failure and no report will be
|
||||
* returned.
|
||||
*
|
||||
* @param mmu_info
|
||||
* @param mdesc
|
||||
* @param desc_nr
|
||||
*/
|
||||
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
||||
{
|
||||
void *err;
|
||||
for (size_t i = 0; i < desc_nr; i++)
|
||||
{
|
||||
size_t attr;
|
||||
switch (mdesc->attr)
|
||||
{
|
||||
case NORMAL_MEM:
|
||||
attr = MMU_MAP_K_RWCB;
|
||||
break;
|
||||
case NORMAL_NOCACHE_MEM:
|
||||
attr = MMU_MAP_K_RWCB;
|
||||
break;
|
||||
case DEVICE_MEM:
|
||||
attr = MMU_MAP_K_DEVICE;
|
||||
break;
|
||||
default:
|
||||
attr = MMU_MAP_K_DEVICE;
|
||||
}
|
||||
|
||||
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
|
||||
.limit_start = aspace->start,
|
||||
.limit_range_size = aspace->size,
|
||||
.map_size = mdesc->vaddr_end -
|
||||
mdesc->vaddr_start + 1,
|
||||
.prefer = (void *)mdesc->vaddr_start};
|
||||
|
||||
if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
|
||||
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
||||
int retval;
|
||||
retval = rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
||||
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
|
||||
if (retval)
|
||||
{
|
||||
LOG_E("%s: map failed with code %d", __FUNCTION__, retval);
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
mdesc++;
|
||||
}
|
||||
|
||||
rt_hw_mmu_ktbl_set((unsigned long)rt_kernel_space.page_table);
|
||||
rt_page_cleanup();
|
||||
}
|
||||
|
||||
static void _init_region(void *vaddr, size_t size)
|
||||
{
|
||||
rt_ioremap_start = vaddr;
|
||||
rt_ioremap_size = size;
|
||||
rt_mpr_start = (char *)rt_ioremap_start - rt_mpr_size;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This function will initialize rt_mmu_info structure.
|
||||
*
|
||||
* @param mmu_info rt_mmu_info structure
|
||||
* @param v_address virtual address
|
||||
* @param size map size
|
||||
* @param vtable mmu table
|
||||
* @param pv_off pv offset in kernel space
|
||||
*
|
||||
* @return 0 on successful and -1 for fail
|
||||
*/
|
||||
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
|
||||
size_t *vtable, size_t pv_off)
|
||||
{
|
||||
size_t va_s, va_e;
|
||||
|
||||
if (!aspace || !vtable)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
va_s = (size_t)v_address;
|
||||
va_e = (size_t)v_address + size - 1;
|
||||
|
||||
if (va_e < va_s)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
va_s >>= ARCH_SECTION_SHIFT;
|
||||
va_e >>= ARCH_SECTION_SHIFT;
|
||||
|
||||
if (va_s == 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
|
||||
vtable);
|
||||
|
||||
_init_region(v_address, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
rt_weak long rt_hw_mmu_config_tbi(int tbi_index)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/************ setting el1 mmu register**************
|
||||
MAIR_EL1
|
||||
index 0 : memory outer writeback, write/read alloc
|
||||
index 1 : memory nocache
|
||||
index 2 : device nGnRnE
|
||||
*****************************************************/
|
||||
void mmu_tcr_init(void)
|
||||
{
|
||||
unsigned long val64;
|
||||
unsigned long pa_range;
|
||||
|
||||
val64 = 0x00447fUL;
|
||||
__asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n" ::"r"(val64));
|
||||
|
||||
__asm__ volatile ("mrs %0, ID_AA64MMFR0_EL1":"=r"(val64));
|
||||
pa_range = val64 & 0xf; /* PARange */
|
||||
|
||||
/* TCR_EL1 */
|
||||
val64 = (16UL << 0) /* t0sz 48bit */
|
||||
| (0x0UL << 6) /* reserved */
|
||||
| (0x0UL << 7) /* epd0 */
|
||||
| (0x3UL << 8) /* t0 wb cacheable */
|
||||
| (0x3UL << 10) /* inner shareable */
|
||||
| (0x2UL << 12) /* t0 outer shareable */
|
||||
| (0x0UL << 14) /* t0 4K */
|
||||
| (16UL << 16) /* t1sz 48bit */
|
||||
| (0x0UL << 22) /* define asid use ttbr0.asid */
|
||||
| (0x0UL << 23) /* epd1 */
|
||||
| (0x3UL << 24) /* t1 inner wb cacheable */
|
||||
| (0x3UL << 26) /* t1 outer wb cacheable */
|
||||
| (0x2UL << 28) /* t1 outer shareable */
|
||||
| (0x2UL << 30) /* t1 4k */
|
||||
| (pa_range << 32) /* PA range */
|
||||
| (0x0UL << 35) /* reserved */
|
||||
| (0x1UL << 36) /* as: 0:8bit 1:16bit */
|
||||
| (TCR_CONFIG_TBI0 << 37) /* tbi0 */
|
||||
| (TCR_CONFIG_TBI1 << 38); /* tbi1 */
|
||||
__asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
|
||||
}
|
||||
|
||||
struct page_table
|
||||
{
|
||||
unsigned long page[512];
|
||||
};
|
||||
|
||||
/* */
|
||||
static struct page_table* __init_page_array;
|
||||
static unsigned long __page_off = 0UL;
|
||||
unsigned long get_ttbrn_base(void)
|
||||
{
|
||||
return (unsigned long) __init_page_array;
|
||||
}
|
||||
|
||||
void set_free_page(void *page_array)
|
||||
{
|
||||
__init_page_array = page_array;
|
||||
}
|
||||
|
||||
unsigned long get_free_page(void)
|
||||
{
|
||||
return (unsigned long) (__init_page_array[__page_off++].page);
|
||||
}
|
||||
|
||||
static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
|
||||
unsigned long pa, unsigned long attr,
|
||||
rt_bool_t flush)
|
||||
{
|
||||
int level;
|
||||
unsigned long *cur_lv_tbl = lv0_tbl;
|
||||
unsigned long page;
|
||||
unsigned long off;
|
||||
int level_shift = MMU_ADDRESS_BITS;
|
||||
|
||||
if (va & ARCH_SECTION_MASK)
|
||||
{
|
||||
return MMU_MAP_ERROR_VANOTALIGN;
|
||||
}
|
||||
if (pa & ARCH_PAGE_MASK)
|
||||
{
|
||||
return MMU_MAP_ERROR_PANOTALIGN;
|
||||
}
|
||||
for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
|
||||
{
|
||||
off = (va >> level_shift);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
|
||||
{
|
||||
page = get_free_page();
|
||||
if (!page)
|
||||
{
|
||||
return MMU_MAP_ERROR_NOPAGE;
|
||||
}
|
||||
rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
|
||||
cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
|
||||
if (flush)
|
||||
{
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
||||
}
|
||||
}
|
||||
page = cur_lv_tbl[off];
|
||||
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
||||
{
|
||||
/* is block! error! */
|
||||
return MMU_MAP_ERROR_CONFLICT;
|
||||
}
|
||||
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
||||
level_shift -= MMU_LEVEL_SHIFT;
|
||||
}
|
||||
attr &= MMU_ATTRIB_MASK;
|
||||
pa |= (attr | MMU_TYPE_BLOCK); /* block */
|
||||
off = (va >> ARCH_SECTION_SHIFT);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
cur_lv_tbl[off] = pa;
|
||||
if (flush)
|
||||
{
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *rt_hw_mmu_tbl_get(void)
|
||||
{
|
||||
uintptr_t tbl;
|
||||
__asm__ volatile("MRS %0, TTBR0_EL1" : "=r"(tbl));
|
||||
return rt_kmem_p2v((void *)(tbl & ((1ul << 48) - 2)));
|
||||
}
|
||||
|
||||
void *rt_ioremap_early(void *paddr, size_t size)
|
||||
{
|
||||
volatile size_t count;
|
||||
rt_ubase_t base;
|
||||
static void *tbl = RT_NULL;
|
||||
|
||||
if (!size)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
if (!tbl)
|
||||
{
|
||||
tbl = rt_hw_mmu_tbl_get();
|
||||
}
|
||||
|
||||
/* get the total size required including overhead for alignment */
|
||||
count = (size + ((rt_ubase_t)paddr & ARCH_SECTION_MASK)
|
||||
+ ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
|
||||
base = (rt_ubase_t)paddr & (~ARCH_SECTION_MASK);
|
||||
|
||||
while (count --> 0)
|
||||
{
|
||||
if (_map_single_page_2M(tbl, base, base, MMU_MAP_K_DEVICE, RT_TRUE))
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
base += ARCH_SECTION_SIZE;
|
||||
}
|
||||
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
|
||||
unsigned long pa, unsigned long count,
|
||||
unsigned long attr)
|
||||
{
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
if (va & ARCH_SECTION_MASK)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
if (pa & ARCH_SECTION_MASK)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
for (i = 0; i < count; i++)
|
||||
{
|
||||
ret = _map_single_page_2M(lv0_tbl, va, pa, attr, RT_FALSE);
|
||||
va += ARCH_SECTION_SIZE;
|
||||
pa += ARCH_SECTION_SIZE;
|
||||
if (ret != 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long *_query(rt_aspace_t aspace, void *vaddr, int *plvl_shf)
|
||||
{
|
||||
int level;
|
||||
unsigned long va = (unsigned long)vaddr;
|
||||
unsigned long *cur_lv_tbl;
|
||||
unsigned long page;
|
||||
unsigned long off;
|
||||
int level_shift = MMU_ADDRESS_BITS;
|
||||
|
||||
cur_lv_tbl = aspace->page_table;
|
||||
RT_ASSERT(cur_lv_tbl);
|
||||
|
||||
for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
|
||||
{
|
||||
off = (va >> level_shift);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
|
||||
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
|
||||
{
|
||||
*plvl_shf = level_shift;
|
||||
return (void *)0;
|
||||
}
|
||||
|
||||
page = cur_lv_tbl[off];
|
||||
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
||||
{
|
||||
*plvl_shf = level_shift;
|
||||
return &cur_lv_tbl[off];
|
||||
}
|
||||
|
||||
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
||||
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
|
||||
level_shift -= MMU_LEVEL_SHIFT;
|
||||
}
|
||||
/* now is level MMU_TBL_PAGE_4k_LEVEL */
|
||||
off = (va >> ARCH_PAGE_SHIFT);
|
||||
off &= MMU_LEVEL_MASK;
|
||||
page = cur_lv_tbl[off];
|
||||
|
||||
*plvl_shf = level_shift;
|
||||
if (!(page & MMU_TYPE_USED))
|
||||
{
|
||||
return (void *)0;
|
||||
}
|
||||
return &cur_lv_tbl[off];
|
||||
}
|
||||
|
||||
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
|
||||
{
|
||||
int level_shift;
|
||||
unsigned long paddr;
|
||||
|
||||
if (aspace == &rt_kernel_space)
|
||||
{
|
||||
paddr = (unsigned long)rt_hw_mmu_kernel_v2p(v_addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned long *pte = _query(aspace, v_addr, &level_shift);
|
||||
|
||||
if (pte)
|
||||
{
|
||||
paddr = *pte & MMU_ADDRESS_MASK;
|
||||
paddr |= (rt_ubase_t)v_addr & ((1ul << level_shift) - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
paddr = (unsigned long)ARCH_MAP_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
return (void *)paddr;
|
||||
}
|
||||
|
||||
static int _noncache(rt_ubase_t *pte)
|
||||
{
|
||||
int err = 0;
|
||||
const rt_ubase_t idx_shift = 2;
|
||||
const rt_ubase_t idx_mask = 0x7 << idx_shift;
|
||||
rt_ubase_t entry = *pte;
|
||||
if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
|
||||
{
|
||||
*pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
|
||||
}
|
||||
else
|
||||
{
|
||||
// do not support other type to be noncache
|
||||
err = -RT_ENOSYS;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int _cache(rt_ubase_t *pte)
|
||||
{
|
||||
int err = 0;
|
||||
const rt_ubase_t idx_shift = 2;
|
||||
const rt_ubase_t idx_mask = 0x7 << idx_shift;
|
||||
rt_ubase_t entry = *pte;
|
||||
if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
|
||||
{
|
||||
*pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
|
||||
}
|
||||
else
|
||||
{
|
||||
// do not support other type to be cache
|
||||
err = -RT_ENOSYS;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_ubase_t *pte) = {
|
||||
[MMU_CNTL_CACHE] = _cache,
|
||||
[MMU_CNTL_NONCACHE] = _noncache,
|
||||
};
|
||||
|
||||
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
|
||||
enum rt_mmu_cntl cmd)
|
||||
{
|
||||
int level_shift;
|
||||
int err = -RT_EINVAL;
|
||||
rt_ubase_t vstart = (rt_ubase_t)vaddr;
|
||||
rt_ubase_t vend = vstart + size;
|
||||
|
||||
int (*handler)(rt_ubase_t * pte);
|
||||
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
|
||||
{
|
||||
handler = control_handler[cmd];
|
||||
|
||||
while (vstart < vend)
|
||||
{
|
||||
rt_ubase_t *pte = _query(aspace, (void *)vstart, &level_shift);
|
||||
rt_ubase_t range_end = vstart + (1ul << level_shift);
|
||||
RT_ASSERT(range_end <= vend);
|
||||
|
||||
if (pte)
|
||||
{
|
||||
err = handler(pte);
|
||||
RT_ASSERT(err == RT_EOK);
|
||||
}
|
||||
vstart = range_end;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
err = -RT_ENOSYS;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
|
||||
unsigned long size, unsigned long pv_off)
|
||||
{
|
||||
int ret;
|
||||
unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
|
||||
unsigned long normal_attr = MMU_MAP_K_RWCB;
|
||||
extern unsigned char _start;
|
||||
unsigned long va = (unsigned long) &_start - pv_off;
|
||||
va = RT_ALIGN_DOWN(va, 0x200000);
|
||||
|
||||
/* setup pv off */
|
||||
rt_kmem_pvoff_set(pv_off);
|
||||
|
||||
/* clean the first two pages */
|
||||
rt_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
|
||||
rt_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
|
||||
|
||||
ret = _init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
|
||||
if (ret != 0)
|
||||
{
|
||||
while (1);
|
||||
}
|
||||
ret = _init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
|
||||
if (ret != 0)
|
||||
{
|
||||
while (1);
|
||||
}
|
||||
}
|
||||
|
||||
void *rt_hw_mmu_pgtbl_create(void)
|
||||
{
|
||||
size_t *mmu_table;
|
||||
mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!mmu_table)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
memset(mmu_table, 0, ARCH_PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
|
||||
return mmu_table;
|
||||
}
|
||||
|
||||
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
|
||||
{
|
||||
rt_pages_free(pgtbl, 0);
|
||||
}
|
||||
144
RT_Thread/libcpu/aarch64/common/mp/context_gcc.S
Normal file
144
RT_Thread/libcpu/aarch64/common/mp/context_gcc.S
Normal file
@ -0,0 +1,144 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-18 Jesven the first version
|
||||
* 2023-06-24 Shell Support backtrace for user thread
|
||||
* 2024-01-06 Shell Fix barrier on irq_disable/enable
|
||||
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif
|
||||
|
||||
#include "context_gcc.h"
|
||||
#include "../include/vector_gcc.h"
|
||||
|
||||
#include <rtconfig.h>
|
||||
#include <asm-generic.h>
|
||||
#include <asm-fpu.h>
|
||||
#include <armv8.h>
|
||||
|
||||
.section .text
|
||||
|
||||
.globl rt_hw_context_switch_to
|
||||
|
||||
.macro update_tidr, srcx
|
||||
#ifdef ARCH_USING_HW_THREAD_SELF
|
||||
msr ARM64_THREAD_REG, \srcx
|
||||
#endif /* ARCH_USING_HW_THREAD_SELF */
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void rt_hw_context_switch_to(rt_uint3 to, struct rt_thread *to_thread);
|
||||
* X0 --> to (thread stack)
|
||||
* X1 --> to_thread
|
||||
*/
|
||||
rt_hw_context_switch_to:
|
||||
ldr x0, [x0]
|
||||
mov sp, x0
|
||||
update_tidr x1
|
||||
|
||||
/* reserved to_thread */
|
||||
mov x19, x1
|
||||
|
||||
mov x0, x19
|
||||
bl rt_cpus_lock_status_restore
|
||||
#ifdef RT_USING_SMART
|
||||
mov x0, x19
|
||||
bl lwp_user_setting_restore
|
||||
#endif
|
||||
b _context_switch_exit
|
||||
|
||||
.globl rt_hw_context_switch
|
||||
|
||||
/*
|
||||
* void rt_hw_context_switch(rt_uint32 from, rt_uint32
|
||||
to, struct rt_thread *to_thread);
|
||||
* X0 --> from (from_thread stack)
|
||||
* X1 --> to (to_thread stack)
|
||||
* X2 --> to_thread
|
||||
*/
|
||||
rt_hw_context_switch:
|
||||
SAVE_CONTEXT_SWITCH x19, x20
|
||||
mov x3, sp
|
||||
str x3, [x0] // store sp in preempted tasks TCB
|
||||
ldr x0, [x1] // get new task stack pointer
|
||||
mov sp, x0
|
||||
update_tidr x2
|
||||
|
||||
/* backup thread self */
|
||||
mov x19, x2
|
||||
|
||||
mov x0, x19
|
||||
bl rt_cpus_lock_status_restore
|
||||
#ifdef RT_USING_SMART
|
||||
mov x0, x19
|
||||
bl lwp_user_setting_restore
|
||||
#endif
|
||||
b _context_switch_exit
|
||||
|
||||
.globl rt_hw_irq_exit
|
||||
.globl rt_hw_context_switch_interrupt
|
||||
|
||||
#define EXP_FRAME x19
|
||||
#define FROM_SPP x20
|
||||
#define TO_SPP x21
|
||||
#define TO_TCB x22
|
||||
/*
|
||||
* void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb)
|
||||
* X0 :interrupt context
|
||||
* X1 :addr of from_thread's sp
|
||||
* X2 :addr of to_thread's sp
|
||||
* X3 :to_thread's tcb
|
||||
*/
|
||||
rt_hw_context_switch_interrupt:
|
||||
#ifdef RT_USING_DEBUG
|
||||
/* debug frame for backtrace */
|
||||
stp x29, x30, [sp, #-0x10]!
|
||||
#endif /* RT_USING_DEBUG */
|
||||
|
||||
/* we can discard all the previous ABI here */
|
||||
mov EXP_FRAME, x0
|
||||
mov FROM_SPP, x1
|
||||
mov TO_SPP, x2
|
||||
mov TO_TCB, x3
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
GET_THREAD_SELF x0
|
||||
bl lwp_user_setting_save
|
||||
#endif /* RT_USING_SMART */
|
||||
|
||||
/* reset SP of from-thread */
|
||||
mov sp, EXP_FRAME
|
||||
|
||||
/* push context for swtich */
|
||||
adr lr, rt_hw_irq_exit
|
||||
SAVE_CONTEXT_SWITCH_FAST
|
||||
|
||||
/* save SP of from-thread */
|
||||
mov x0, sp
|
||||
str x0, [FROM_SPP]
|
||||
|
||||
/* setup SP to to-thread's */
|
||||
ldr x0, [TO_SPP]
|
||||
mov sp, x0
|
||||
update_tidr TO_TCB
|
||||
|
||||
mov x0, TO_TCB
|
||||
bl rt_cpus_lock_status_restore
|
||||
#ifdef RT_USING_SMART
|
||||
mov x0, TO_TCB
|
||||
bl lwp_user_setting_restore
|
||||
#endif /* RT_USING_SMART */
|
||||
b _context_switch_exit
|
||||
|
||||
_context_switch_exit:
|
||||
.local _context_switch_exit
|
||||
|
||||
clrex
|
||||
RESTORE_CONTEXT_SWITCH
|
||||
60
RT_Thread/libcpu/aarch64/common/mp/context_gcc.h
Normal file
60
RT_Thread/libcpu/aarch64/common/mp/context_gcc.h
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-03-25 Shell Trimming unecessary ops and
|
||||
* improve the performance of ctx switch
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_CONTEXT_H__
|
||||
#define __ARM64_CONTEXT_H__
|
||||
|
||||
#include "../include/context_gcc.h"
|
||||
|
||||
#include <rtconfig.h>
|
||||
#include <asm-generic.h>
|
||||
#include <asm-fpu.h>
|
||||
#include <armv8.h>
|
||||
|
||||
.macro RESTORE_CONTEXT_SWITCH
|
||||
_RESTORE_CONTEXT_SWITCH
|
||||
.endm
|
||||
|
||||
.macro RESTORE_IRQ_CONTEXT
|
||||
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
|
||||
|
||||
tst x3, #0x1f
|
||||
msr spsr_el1, x3
|
||||
msr elr_el1, x2
|
||||
|
||||
ldp x29, x30, [sp], #0x10
|
||||
msr sp_el0, x29
|
||||
ldp x28, x29, [sp], #0x10
|
||||
msr fpcr, x28
|
||||
msr fpsr, x29
|
||||
ldp x28, x29, [sp], #0x10
|
||||
ldp x26, x27, [sp], #0x10
|
||||
ldp x24, x25, [sp], #0x10
|
||||
ldp x22, x23, [sp], #0x10
|
||||
ldp x20, x21, [sp], #0x10
|
||||
ldp x18, x19, [sp], #0x10
|
||||
ldp x16, x17, [sp], #0x10
|
||||
ldp x14, x15, [sp], #0x10
|
||||
ldp x12, x13, [sp], #0x10
|
||||
ldp x10, x11, [sp], #0x10
|
||||
ldp x8, x9, [sp], #0x10
|
||||
ldp x6, x7, [sp], #0x10
|
||||
ldp x4, x5, [sp], #0x10
|
||||
ldp x2, x3, [sp], #0x10
|
||||
ldp x0, x1, [sp], #0x10
|
||||
RESTORE_FPU sp
|
||||
#ifdef RT_USING_SMART
|
||||
beq arch_ret_to_user
|
||||
#endif
|
||||
eret
|
||||
.endm
|
||||
|
||||
#endif /* __ARM64_CONTEXT_H__ */
|
||||
34
RT_Thread/libcpu/aarch64/common/mp/vector_gcc.S
Normal file
34
RT_Thread/libcpu/aarch64/common/mp/vector_gcc.S
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif
|
||||
|
||||
#include "vector_gcc.h"
|
||||
#include "context_gcc.h"
|
||||
|
||||
.section .text
|
||||
|
||||
vector_fiq:
|
||||
.globl vector_fiq
|
||||
b .
|
||||
|
||||
.globl rt_hw_irq_exit
|
||||
|
||||
/**
|
||||
* void rt_hw_vector_irq_sched(void *eframe)
|
||||
* @brief do IRQ scheduling
|
||||
*/
|
||||
rt_hw_vector_irq_sched:
|
||||
.globl rt_hw_vector_irq_sched
|
||||
|
||||
bl rt_scheduler_do_irq_switch
|
||||
b rt_hw_irq_exit
|
||||
418
RT_Thread/libcpu/aarch64/common/psci.c
Normal file
418
RT_Thread/libcpu/aarch64/common/psci.c
Normal file
@ -0,0 +1,418 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-09-09 GuEe-GUI The first version
|
||||
* 2022-09-24 GuEe-GUI Add operations and fdt init support
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
#define DBG_TAG "osi.psci"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
/* support cpu mpidr and smccc from libcpu */
|
||||
#include <cpu.h>
|
||||
#include <smccc.h>
|
||||
#include <psci.h>
|
||||
#include <drivers/ofw.h>
|
||||
#include <drivers/platform.h>
|
||||
#include <drivers/core/dm.h>
|
||||
|
||||
struct psci_ops
|
||||
{
|
||||
rt_uint32_t (*get_version)(void);
|
||||
rt_uint32_t (*cpu_on)(int cpuid, rt_ubase_t entry_point);
|
||||
rt_uint32_t (*cpu_off)(rt_uint32_t state);
|
||||
rt_uint32_t (*cpu_suspend)(rt_uint32_t power_state, rt_ubase_t entry_point);
|
||||
rt_uint32_t (*migrate)(int cpuid);
|
||||
rt_uint32_t (*get_affinity_info)(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level);
|
||||
rt_uint32_t (*migrate_info_type)(void);
|
||||
};
|
||||
|
||||
struct psci_0_1_func_ids
|
||||
{
|
||||
rt_uint32_t cpu_on;
|
||||
rt_uint32_t cpu_off;
|
||||
rt_uint32_t cpu_suspend;
|
||||
rt_uint32_t migrate;
|
||||
};
|
||||
|
||||
typedef rt_err_t (*psci_init_ofw_handle)(struct rt_ofw_node *np);
|
||||
typedef rt_ubase_t (*psci_call_handle)(rt_uint32_t fn, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
|
||||
|
||||
/* [40:63] and [24:31] must be zero, other is aff3 (64bit), aff2, aff1, aff0 */
|
||||
#ifdef ARCH_CPU_64BIT
|
||||
#define PSCI_FNC_ID(version_major, version_min, name) PSCI_##version_major##_##version_min##_FN64_##name
|
||||
#define MPIDR_MASK 0xff00ffffff
|
||||
#else
|
||||
#define PSCI_FNC_ID(version_major, version_min, name) PSCI_##version_major##_##version_min##_FN_##name
|
||||
#define MPIDR_MASK 0x00ffffff
|
||||
#endif
|
||||
|
||||
static struct psci_ops _psci_ops = {};
|
||||
|
||||
static struct psci_0_1_func_ids psci_0_1_func_ids = {};
|
||||
static psci_call_handle psci_call;
|
||||
|
||||
/* PSCI SMCCC */
|
||||
static rt_ubase_t psci_smc_call(rt_uint32_t fn, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2)
|
||||
{
|
||||
struct arm_smccc_res_t res;
|
||||
|
||||
arm_smccc_smc(fn, arg0, arg1, arg2, 0, 0, 0, 0, &res, RT_NULL);
|
||||
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static rt_ubase_t psci_hvc_call(rt_uint32_t fn, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2)
|
||||
{
|
||||
struct arm_smccc_res_t res;
|
||||
|
||||
arm_smccc_hvc(fn, arg0, arg1, arg2, 0, 0, 0, 0, &res, RT_NULL);
|
||||
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
/* PSCI VERSION */
|
||||
static rt_uint32_t psci_0_1_get_version(void)
|
||||
{
|
||||
return PSCI_VERSION(0, 1);
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_2_get_version(void)
|
||||
{
|
||||
return (rt_uint32_t)psci_call(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
|
||||
}
|
||||
|
||||
/* PSCI FEATURES */
|
||||
static rt_uint32_t psci_get_features(rt_uint32_t psci_func_id)
|
||||
{
|
||||
return (rt_uint32_t)psci_call(PSCI_1_0_FN_PSCI_FEATURES, psci_func_id, 0, 0);
|
||||
}
|
||||
|
||||
/* PSCI CPU_ON */
|
||||
static rt_uint32_t psci_cpu_on(rt_uint32_t func_id, int cpuid, rt_ubase_t entry_point)
|
||||
{
|
||||
rt_uint32_t ret = -PSCI_RET_INVALID_PARAMETERS;
|
||||
|
||||
if (cpuid < RT_CPUS_NR)
|
||||
{
|
||||
rt_ubase_t mpid = rt_cpu_mpidr_table[cpuid] & MPIDR_MASK;
|
||||
|
||||
ret = (rt_uint32_t)psci_call(func_id, mpid, entry_point, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_1_cpu_on(int cpuid, rt_ubase_t entry_point)
|
||||
{
|
||||
return psci_cpu_on(psci_0_1_func_ids.cpu_on, cpuid, entry_point);
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_2_cpu_on(int cpuid, rt_ubase_t entry_point)
|
||||
{
|
||||
return psci_cpu_on(PSCI_FNC_ID(0, 2, CPU_ON), cpuid, entry_point);
|
||||
}
|
||||
|
||||
/* PSCI CPU_OFF */
|
||||
static rt_uint32_t psci_cpu_off(rt_uint32_t func_id, rt_uint32_t state)
|
||||
{
|
||||
return (rt_uint32_t)psci_call(func_id, state, 0, 0);
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_1_cpu_off(rt_uint32_t state)
|
||||
{
|
||||
return psci_cpu_off(psci_0_1_func_ids.cpu_off, state);
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_2_cpu_off(rt_uint32_t state)
|
||||
{
|
||||
return psci_cpu_off(PSCI_0_2_FN_CPU_OFF, state);
|
||||
}
|
||||
|
||||
/* PSCI CPU_SUSPEND */
|
||||
static rt_uint32_t psci_cpu_suspend(rt_uint32_t func_id, rt_uint32_t power_state, rt_ubase_t entry_point)
|
||||
{
|
||||
return (rt_uint32_t)psci_call(func_id, power_state, entry_point, 0);
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_1_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point)
|
||||
{
|
||||
return psci_cpu_suspend(psci_0_1_func_ids.cpu_suspend, power_state, entry_point);
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_2_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point)
|
||||
{
|
||||
return psci_cpu_suspend(PSCI_FNC_ID(0, 2, CPU_SUSPEND), power_state, entry_point);
|
||||
}
|
||||
|
||||
/* PSCI CPU_MIGRATE */
|
||||
static rt_uint32_t psci_migrate(rt_uint32_t func_id, int cpuid)
|
||||
{
|
||||
rt_uint32_t ret = -PSCI_RET_INVALID_PARAMETERS;
|
||||
|
||||
if (cpuid < RT_CPUS_NR)
|
||||
{
|
||||
rt_ubase_t mpid = rt_cpu_mpidr_table[cpuid] & MPIDR_MASK;
|
||||
|
||||
ret = (rt_uint32_t)psci_call(func_id, mpid, 0, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_1_migrate(int cpuid)
|
||||
{
|
||||
return psci_migrate(psci_0_1_func_ids.migrate, cpuid);
|
||||
}
|
||||
|
||||
static rt_uint32_t psci_0_2_migrate(int cpuid)
|
||||
{
|
||||
return psci_migrate(PSCI_FNC_ID(0, 2, MIGRATE), cpuid);
|
||||
}
|
||||
|
||||
/* PSCI AFFINITY_INFO */
|
||||
static rt_uint32_t psci_affinity_info(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level)
|
||||
{
|
||||
return (rt_uint32_t)psci_call(PSCI_FNC_ID(0, 2, AFFINITY_INFO), target_affinity, lowest_affinity_level, 0);
|
||||
}
|
||||
|
||||
/* PSCI MIGRATE_INFO_TYPE */
|
||||
static rt_uint32_t psci_migrate_info_type(void)
|
||||
{
|
||||
return (rt_uint32_t)psci_call(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
|
||||
}
|
||||
|
||||
/* PSCI SYSTEM_OFF */
|
||||
void psci_system_off(void)
|
||||
{
|
||||
psci_call(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
|
||||
}
|
||||
|
||||
/* PSCI SYSTEM_RESET */
|
||||
void psci_system_reboot(void)
|
||||
{
|
||||
if (psci_get_features(PSCI_FNC_ID(1, 1, SYSTEM_RESET2)) != PSCI_RET_NOT_SUPPORTED)
|
||||
{
|
||||
/*
|
||||
* reset_type[31] = 0 (architectural)
|
||||
* reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
|
||||
* cookie = 0 (ignored by the implementation)
|
||||
*/
|
||||
psci_call(PSCI_FNC_ID(1, 1, SYSTEM_RESET2), 0, 0, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
psci_call(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#define PSCI_CALL_FN_RET(fn, ...) \
|
||||
({ \
|
||||
rt_uint32_t rc; \
|
||||
rc = PSCI_RET_NOT_SUPPORTED; \
|
||||
if (_psci_ops.fn) \
|
||||
rc = _psci_ops.fn(__VA_ARGS__); \
|
||||
rc; \
|
||||
})
|
||||
|
||||
#define PSCI_CALL_FN(fn, ...) \
|
||||
({ \
|
||||
if (_psci_ops.fn) \
|
||||
_psci_ops.fn(__VA_ARGS__); \
|
||||
})
|
||||
|
||||
rt_uint32_t rt_psci_get_version(void)
|
||||
{
|
||||
return PSCI_CALL_FN_RET(get_version);
|
||||
}
|
||||
|
||||
rt_uint32_t rt_psci_cpu_on(int cpuid, rt_ubase_t entry_point)
|
||||
{
|
||||
return PSCI_CALL_FN_RET(cpu_on, cpuid, entry_point);
|
||||
}
|
||||
|
||||
rt_uint32_t rt_psci_cpu_off(rt_uint32_t state)
|
||||
{
|
||||
return PSCI_CALL_FN_RET(cpu_off, state);
|
||||
}
|
||||
|
||||
rt_uint32_t rt_psci_cpu_suspend(rt_uint32_t power_state, rt_ubase_t entry_point)
|
||||
{
|
||||
return PSCI_CALL_FN_RET(cpu_suspend, power_state, entry_point);
|
||||
}
|
||||
|
||||
rt_uint32_t rt_psci_migrate(int cpuid)
|
||||
{
|
||||
return PSCI_CALL_FN_RET(migrate, cpuid);
|
||||
}
|
||||
|
||||
rt_uint32_t rt_psci_get_affinity_info(rt_ubase_t target_affinity, rt_ubase_t lowest_affinity_level)
|
||||
{
|
||||
return PSCI_CALL_FN_RET(get_affinity_info, target_affinity, lowest_affinity_level);
|
||||
}
|
||||
|
||||
rt_uint32_t rt_psci_migrate_info_type(void)
|
||||
{
|
||||
return PSCI_CALL_FN_RET(migrate_info_type);
|
||||
}
|
||||
|
||||
#undef PSCI_CALL_FN_RET
|
||||
#undef PSCI_CALL_FN
|
||||
|
||||
/* PSCI INIT */
|
||||
static rt_err_t psci_0_1_init(struct rt_ofw_node *np)
|
||||
{
|
||||
rt_err_t err = RT_EOK;
|
||||
rt_uint32_t func_id;
|
||||
|
||||
_psci_ops.get_version = psci_0_1_get_version;
|
||||
|
||||
if (!rt_ofw_prop_read_u32(np, "cpu_on", &func_id))
|
||||
{
|
||||
psci_0_1_func_ids.cpu_on = func_id;
|
||||
_psci_ops.cpu_on = psci_0_1_cpu_on;
|
||||
}
|
||||
|
||||
if (!rt_ofw_prop_read_u32(np, "cpu_off", &func_id))
|
||||
{
|
||||
psci_0_1_func_ids.cpu_off = func_id;
|
||||
_psci_ops.cpu_off = psci_0_1_cpu_off;
|
||||
}
|
||||
|
||||
if (!rt_ofw_prop_read_u32(np, "cpu_suspend", &func_id))
|
||||
{
|
||||
psci_0_1_func_ids.cpu_suspend = func_id;
|
||||
_psci_ops.cpu_suspend = psci_0_1_cpu_suspend;
|
||||
}
|
||||
|
||||
if (!rt_ofw_prop_read_u32(np, "migrate", &func_id))
|
||||
{
|
||||
psci_0_1_func_ids.migrate = func_id;
|
||||
_psci_ops.migrate = psci_0_1_migrate;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static rt_err_t psci_0_2_init(struct rt_ofw_node *np)
|
||||
{
|
||||
rt_err_t err = RT_EOK;
|
||||
rt_uint32_t version = psci_0_2_get_version();
|
||||
|
||||
if (version >= PSCI_VERSION(0, 2))
|
||||
{
|
||||
_psci_ops.get_version = psci_0_2_get_version;
|
||||
_psci_ops.cpu_on = psci_0_2_cpu_on;
|
||||
_psci_ops.cpu_off = psci_0_2_cpu_off;
|
||||
_psci_ops.cpu_suspend = psci_0_2_cpu_suspend;
|
||||
_psci_ops.migrate = psci_0_2_migrate;
|
||||
_psci_ops.get_affinity_info = psci_affinity_info;
|
||||
_psci_ops.migrate_info_type = psci_migrate_info_type;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_E("PSCI version detected");
|
||||
err = -RT_EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static rt_err_t psci_1_0_init(struct rt_ofw_node *np)
|
||||
{
|
||||
rt_err_t err;
|
||||
|
||||
err = psci_0_2_init(np);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static rt_err_t psci_ofw_init(struct rt_platform_device *pdev)
|
||||
{
|
||||
rt_err_t err = RT_EOK;
|
||||
const char *method;
|
||||
const struct rt_ofw_node_id *id = pdev->id;
|
||||
struct rt_ofw_node *np = pdev->parent.ofw_node;
|
||||
|
||||
if (!rt_ofw_prop_read_string(np, "method", &method))
|
||||
{
|
||||
if (!rt_strcmp(method, "smc"))
|
||||
{
|
||||
psci_call = psci_smc_call;
|
||||
}
|
||||
else if (!rt_strcmp(method, "hvc"))
|
||||
{
|
||||
psci_call = psci_hvc_call;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_E("Invalid \"method\" property: %s", method);
|
||||
err = -RT_EINVAL;
|
||||
}
|
||||
|
||||
if (!err)
|
||||
{
|
||||
psci_init_ofw_handle psci_init = (psci_init_ofw_handle)id->data;
|
||||
|
||||
err = psci_init(np);
|
||||
|
||||
if (!err)
|
||||
{
|
||||
rt_uint32_t version = rt_psci_get_version();
|
||||
|
||||
rt_ofw_data(np) = &_psci_ops;
|
||||
|
||||
RT_UNUSED(version);
|
||||
|
||||
LOG_I("Using PSCI v%d.%d Function IDs", PSCI_VERSION_MAJOR(version), PSCI_VERSION_MINOR(version));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
err = -RT_ENOSYS;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static rt_err_t psci_probe(struct rt_platform_device *pdev)
|
||||
{
|
||||
rt_err_t err;
|
||||
|
||||
err = psci_ofw_init(pdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct rt_ofw_node_id psci_ofw_ids[] =
|
||||
{
|
||||
{ .compatible = "arm,psci", .data = psci_0_1_init },
|
||||
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init },
|
||||
{ .compatible = "arm,psci-1.0", .data = psci_1_0_init },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static struct rt_platform_driver psci_driver =
|
||||
{
|
||||
.name = "arm-psci",
|
||||
.ids = psci_ofw_ids,
|
||||
|
||||
.probe = psci_probe,
|
||||
};
|
||||
|
||||
static int psci_drv_register(void)
|
||||
{
|
||||
rt_platform_driver_register(&psci_driver);
|
||||
|
||||
return 0;
|
||||
}
|
||||
INIT_PLATFORM_EXPORT(psci_drv_register);
|
||||
427
RT_Thread/libcpu/aarch64/common/setup.c
Normal file
427
RT_Thread/libcpu/aarch64/common/setup.c
Normal file
@ -0,0 +1,427 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-02-21 GuEe-GUI first version
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
#define DBG_TAG "cpu.aa64"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
#include <smp_call.h>
|
||||
#include <cpu.h>
|
||||
#include <mmu.h>
|
||||
#include <cpuport.h>
|
||||
#include <interrupt.h>
|
||||
#include <gtimer.h>
|
||||
#include <setup.h>
|
||||
#include <stdlib.h>
|
||||
#include <ioremap.h>
|
||||
#include <rtdevice.h>
|
||||
#include <gic.h>
|
||||
#include <gicv3.h>
|
||||
#include <mm_memblock.h>
|
||||
|
||||
#define SIZE_KB 1024
|
||||
#define SIZE_MB (1024 * SIZE_KB)
|
||||
#define SIZE_GB (1024 * SIZE_MB)
|
||||
|
||||
extern rt_ubase_t _start, _end;
|
||||
extern void _secondary_cpu_entry(void);
|
||||
extern size_t MMUTable[];
|
||||
extern void *system_vectors;
|
||||
|
||||
static void *fdt_ptr = RT_NULL;
|
||||
static rt_size_t fdt_size = 0;
|
||||
static rt_uint64_t initrd_ranges[3] = { };
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
extern struct cpu_ops_t cpu_psci_ops;
|
||||
extern struct cpu_ops_t cpu_spin_table_ops;
|
||||
#else
|
||||
extern int rt_hw_cpu_id(void);
|
||||
#endif
|
||||
|
||||
rt_uint64_t rt_cpu_mpidr_table[] =
|
||||
{
|
||||
[RT_CPUS_NR] = 0,
|
||||
};
|
||||
|
||||
static struct cpu_ops_t *cpu_ops[] =
|
||||
{
|
||||
#ifdef RT_USING_SMP
|
||||
&cpu_psci_ops,
|
||||
&cpu_spin_table_ops,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
|
||||
|
||||
void rt_hw_fdt_install_early(void *fdt)
|
||||
{
|
||||
if (fdt != RT_NULL && !fdt_check_header(fdt))
|
||||
{
|
||||
fdt_ptr = fdt;
|
||||
fdt_size = fdt_totalsize(fdt);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef RT_USING_HWTIMER
|
||||
static rt_ubase_t loops_per_tick[RT_CPUS_NR];
|
||||
|
||||
static rt_ubase_t cpu_get_cycles(void)
|
||||
{
|
||||
rt_ubase_t cycles;
|
||||
|
||||
rt_hw_sysreg_read(cntpct_el0, cycles);
|
||||
|
||||
return cycles;
|
||||
}
|
||||
|
||||
static void cpu_loops_per_tick_init(void)
|
||||
{
|
||||
rt_ubase_t offset;
|
||||
volatile rt_ubase_t freq, step, cycles_end1, cycles_end2;
|
||||
volatile rt_uint32_t cycles_count1 = 0, cycles_count2 = 0;
|
||||
|
||||
rt_hw_sysreg_read(cntfrq_el0, freq);
|
||||
step = freq / RT_TICK_PER_SECOND;
|
||||
|
||||
cycles_end1 = cpu_get_cycles() + step;
|
||||
|
||||
while (cpu_get_cycles() < cycles_end1)
|
||||
{
|
||||
__asm__ volatile ("nop");
|
||||
__asm__ volatile ("add %0, %0, #1":"=r"(cycles_count1));
|
||||
}
|
||||
|
||||
cycles_end2 = cpu_get_cycles() + step;
|
||||
|
||||
while (cpu_get_cycles() < cycles_end2)
|
||||
{
|
||||
__asm__ volatile ("add %0, %0, #1":"=r"(cycles_count2));
|
||||
}
|
||||
|
||||
if ((rt_int32_t)(cycles_count2 - cycles_count1) > 0)
|
||||
{
|
||||
offset = cycles_count2 - cycles_count1;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Impossible, but prepared for any eventualities */
|
||||
offset = cycles_count2 / 4;
|
||||
}
|
||||
|
||||
loops_per_tick[rt_hw_cpu_id()] = offset;
|
||||
}
|
||||
|
||||
static void cpu_us_delay(rt_uint32_t us)
|
||||
{
|
||||
volatile rt_base_t start = cpu_get_cycles(), cycles;
|
||||
|
||||
cycles = ((us * 0x10c7UL) * loops_per_tick[rt_hw_cpu_id()] * RT_TICK_PER_SECOND) >> 32;
|
||||
|
||||
while ((cpu_get_cycles() - start) < cycles)
|
||||
{
|
||||
rt_hw_cpu_relax();
|
||||
}
|
||||
}
|
||||
#endif /* RT_USING_HWTIMER */
|
||||
|
||||
rt_weak void rt_hw_idle_wfi(void)
|
||||
{
|
||||
__asm__ volatile ("wfi");
|
||||
}
|
||||
|
||||
static void system_vectors_init(void)
|
||||
{
|
||||
rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
|
||||
}
|
||||
|
||||
rt_inline void cpu_info_init(void)
|
||||
{
|
||||
int i = 0;
|
||||
rt_uint64_t mpidr;
|
||||
struct rt_ofw_node *np;
|
||||
|
||||
/* get boot cpu info */
|
||||
rt_hw_sysreg_read(mpidr_el1, mpidr);
|
||||
|
||||
rt_ofw_foreach_cpu_node(np)
|
||||
{
|
||||
rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
|
||||
|
||||
if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
|
||||
{
|
||||
/* Only save affinity and res make smp boot can check */
|
||||
hwid |= 1ULL << 31;
|
||||
}
|
||||
else
|
||||
{
|
||||
hwid = mpidr;
|
||||
}
|
||||
|
||||
cpu_np[i] = np;
|
||||
rt_cpu_mpidr_table[i] = hwid;
|
||||
|
||||
rt_ofw_data(np) = (void *)hwid;
|
||||
|
||||
for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
|
||||
{
|
||||
struct cpu_ops_t *ops = cpu_ops[idx];
|
||||
|
||||
if (ops->cpu_init)
|
||||
{
|
||||
ops->cpu_init(i, np);
|
||||
}
|
||||
}
|
||||
|
||||
if (++i >= RT_CPUS_NR)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
|
||||
|
||||
#ifdef RT_USING_HWTIMER
|
||||
cpu_loops_per_tick_init();
|
||||
|
||||
if (!rt_device_hwtimer_us_delay)
|
||||
{
|
||||
rt_device_hwtimer_us_delay = &cpu_us_delay;
|
||||
}
|
||||
#endif /* RT_USING_HWTIMER */
|
||||
}
|
||||
|
||||
void rt_hw_common_setup(void)
|
||||
{
|
||||
rt_size_t kernel_start, kernel_end;
|
||||
rt_size_t heap_start, heap_end;
|
||||
rt_size_t init_page_start, init_page_end;
|
||||
rt_size_t fdt_start, fdt_end;
|
||||
rt_region_t init_page_region = { 0 };
|
||||
rt_region_t platform_mem_region = { 0 };
|
||||
static struct mem_desc platform_mem_desc;
|
||||
const rt_ubase_t pv_off = PV_OFFSET;
|
||||
|
||||
system_vectors_init();
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xfffffffff0000000, 0x10000000, MMUTable, pv_off);
|
||||
#else
|
||||
rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x10000000, MMUTable, 0);
|
||||
#endif
|
||||
|
||||
kernel_start = RT_ALIGN_DOWN((rt_size_t)rt_kmem_v2p((void *)&_start) - 64, ARCH_PAGE_SIZE);
|
||||
kernel_end = RT_ALIGN((rt_size_t)rt_kmem_v2p((void *)&_end), ARCH_PAGE_SIZE);
|
||||
heap_start = kernel_end;
|
||||
heap_end = RT_ALIGN(heap_start + ARCH_HEAP_SIZE, ARCH_PAGE_SIZE);
|
||||
init_page_start = heap_end;
|
||||
init_page_end = RT_ALIGN(init_page_start + ARCH_INIT_PAGE_SIZE, ARCH_PAGE_SIZE);
|
||||
fdt_start = init_page_end;
|
||||
fdt_end = RT_ALIGN(fdt_start + fdt_size, ARCH_PAGE_SIZE);
|
||||
|
||||
platform_mem_region.start = kernel_start;
|
||||
platform_mem_region.end = fdt_end;
|
||||
|
||||
rt_memblock_reserve_memory("kernel", kernel_start, kernel_end, MEMBLOCK_NONE);
|
||||
rt_memblock_reserve_memory("memheap", heap_start, heap_end, MEMBLOCK_NONE);
|
||||
rt_memblock_reserve_memory("init-page", init_page_start, init_page_end, MEMBLOCK_NONE);
|
||||
rt_memblock_reserve_memory("fdt", fdt_start, fdt_end, MEMBLOCK_NONE);
|
||||
|
||||
/* To virtual address */
|
||||
fdt_ptr = (void *)(fdt_ptr - pv_off);
|
||||
#ifdef KERNEL_VADDR_START
|
||||
if ((rt_ubase_t)fdt_ptr + fdt_size - KERNEL_VADDR_START > SIZE_GB)
|
||||
{
|
||||
fdt_ptr = rt_ioremap_early(fdt_ptr + pv_off, fdt_size);
|
||||
|
||||
RT_ASSERT(fdt_ptr != RT_NULL);
|
||||
}
|
||||
#endif
|
||||
rt_memmove((void *)(fdt_start - pv_off), fdt_ptr, fdt_size);
|
||||
fdt_ptr = (void *)fdt_start - pv_off;
|
||||
|
||||
rt_system_heap_init((void *)(heap_start - pv_off), (void *)(heap_end - pv_off));
|
||||
|
||||
init_page_region.start = init_page_start - pv_off;
|
||||
init_page_region.end = init_page_end - pv_off;
|
||||
rt_page_init(init_page_region);
|
||||
|
||||
/* create MMU mapping of kernel memory */
|
||||
platform_mem_region.start = RT_ALIGN_DOWN(platform_mem_region.start, ARCH_PAGE_SIZE);
|
||||
platform_mem_region.end = RT_ALIGN(platform_mem_region.end, ARCH_PAGE_SIZE);
|
||||
|
||||
platform_mem_desc.paddr_start = platform_mem_region.start;
|
||||
platform_mem_desc.vaddr_start = platform_mem_region.start - pv_off;
|
||||
platform_mem_desc.vaddr_end = platform_mem_region.end - pv_off - 1;
|
||||
platform_mem_desc.attr = NORMAL_MEM;
|
||||
|
||||
rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
|
||||
|
||||
if (rt_fdt_prefetch(fdt_ptr))
|
||||
{
|
||||
/* Platform cannot be initialized */
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
|
||||
rt_fdt_scan_chosen_stdout();
|
||||
|
||||
rt_fdt_scan_initrd(initrd_ranges);
|
||||
|
||||
rt_fdt_scan_memory();
|
||||
|
||||
rt_memblock_setup_memory_environment();
|
||||
|
||||
rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
|
||||
|
||||
rt_fdt_unflatten();
|
||||
|
||||
cpu_info_init();
|
||||
|
||||
#ifdef RT_USING_PIC
|
||||
rt_pic_init();
|
||||
rt_pic_irq_init();
|
||||
#else
|
||||
/* initialize hardware interrupt */
|
||||
rt_hw_interrupt_init();
|
||||
|
||||
/* initialize uart */
|
||||
rt_hw_uart_init();
|
||||
#endif
|
||||
|
||||
#ifndef RT_HWTIMER_ARM_ARCH
|
||||
/* initialize timer for os tick */
|
||||
rt_hw_gtimer_init();
|
||||
#endif /* !RT_HWTIMER_ARM_ARCH */
|
||||
|
||||
#ifdef RT_USING_COMPONENTS_INIT
|
||||
rt_components_board_init();
|
||||
#endif
|
||||
|
||||
#if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
|
||||
rt_ofw_console_setup();
|
||||
#endif
|
||||
|
||||
rt_thread_idle_sethook(rt_hw_idle_wfi);
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
rt_smp_call_init();
|
||||
/* Install the IPI handle */
|
||||
rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler);
|
||||
rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler);
|
||||
rt_hw_ipi_handler_install(RT_SMP_CALL_IPI, rt_smp_call_ipi_handler);
|
||||
rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
|
||||
rt_hw_interrupt_umask(RT_STOP_IPI);
|
||||
rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
rt_weak void rt_hw_secondary_cpu_up(void)
|
||||
{
|
||||
int cpu_id = rt_hw_cpu_id();
|
||||
rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
|
||||
|
||||
if (!entry)
|
||||
{
|
||||
LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
|
||||
/* Maybe we are no in the first cpu */
|
||||
for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
|
||||
{
|
||||
int err;
|
||||
const char *enable_method;
|
||||
|
||||
if (!cpu_np[i] || i == cpu_id)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
|
||||
|
||||
for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
|
||||
{
|
||||
struct cpu_ops_t *ops = cpu_ops[idx];
|
||||
|
||||
if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
|
||||
{
|
||||
err = ops->cpu_boot(i, entry);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (err)
|
||||
{
|
||||
LOG_W("Call cpu %d on %s", i, "failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rt_weak void rt_hw_secondary_cpu_bsp_start(void)
|
||||
{
|
||||
int cpu_id = rt_hw_cpu_id();
|
||||
|
||||
system_vectors_init();
|
||||
|
||||
rt_hw_spin_lock(&_cpus_lock);
|
||||
|
||||
/* Save all mpidr */
|
||||
rt_hw_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
|
||||
|
||||
rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
|
||||
|
||||
#ifdef RT_USING_PIC
|
||||
rt_pic_irq_init();
|
||||
#else
|
||||
/* initialize vector table */
|
||||
rt_hw_vector_init();
|
||||
|
||||
arm_gic_cpu_init(0, 0);
|
||||
#ifdef BSP_USING_GICV3
|
||||
arm_gic_redist_init(0, 0);
|
||||
#endif /* BSP_USING_GICV3 */
|
||||
#endif
|
||||
|
||||
#ifndef RT_HWTIMER_ARM_ARCH
|
||||
/* initialize timer for os tick */
|
||||
rt_hw_gtimer_local_enable();
|
||||
#endif /* !RT_HWTIMER_ARM_ARCH */
|
||||
|
||||
rt_dm_secondary_cpu_init();
|
||||
|
||||
rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
|
||||
rt_hw_interrupt_umask(RT_STOP_IPI);
|
||||
rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
|
||||
|
||||
LOG_I("Call cpu %d on %s", cpu_id, "success");
|
||||
|
||||
#ifdef RT_USING_HWTIMER
|
||||
if (rt_device_hwtimer_us_delay == &cpu_us_delay)
|
||||
{
|
||||
cpu_loops_per_tick_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
rt_system_scheduler_start();
|
||||
}
|
||||
|
||||
rt_weak void rt_hw_secondary_cpu_idle_exec(void)
|
||||
{
|
||||
rt_hw_wfe();
|
||||
}
|
||||
#endif
|
||||
|
||||
void rt_hw_console_output(const char *str)
|
||||
{
|
||||
rt_fdt_earlycon_output(str);
|
||||
}
|
||||
32
RT_Thread/libcpu/aarch64/common/smccc.S
Normal file
32
RT_Thread/libcpu/aarch64/common/smccc.S
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
/**
|
||||
* SMCCC v0.2
|
||||
* ARM DEN0028E chapter 2.6
|
||||
*/
|
||||
.macro SMCCC instr
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
\instr #0
|
||||
// store in arm_smccc_res
|
||||
ldr x4, [sp, #16]
|
||||
stp x0, x1, [x4, #0]
|
||||
stp x2, x3, [x4, #16]
|
||||
1:
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
.endm
|
||||
|
||||
.global arm_smccc_smc
|
||||
arm_smccc_smc:
|
||||
SMCCC smc
|
||||
|
||||
.global arm_smccc_hvc
|
||||
arm_smccc_hvc:
|
||||
SMCCC hvc
|
||||
63
RT_Thread/libcpu/aarch64/common/stack.c
Normal file
63
RT_Thread/libcpu/aarch64/common/stack.c
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-12 RT-Thread init
|
||||
* 2023-07-13 GuEe-GUI append fpu: Q16 ~ Q31
|
||||
*/
|
||||
#include <board.h>
|
||||
#include <rtthread.h>
|
||||
#include <cpuport.h>
|
||||
|
||||
#include <armv8.h>
|
||||
|
||||
#define INITIAL_SPSR_EL1 (PSTATE_EL1 | SP_ELx)
|
||||
|
||||
/**
|
||||
* This function will initialize thread stack
|
||||
*
|
||||
* @param tentry the entry of thread
|
||||
* @param parameter the parameter of entry
|
||||
* @param stack_addr the beginning stack address
|
||||
* @param texit the function will be called when thread exit
|
||||
*
|
||||
* @return stack address
|
||||
*/
|
||||
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
|
||||
rt_uint8_t *stack_addr, void *texit)
|
||||
{
|
||||
rt_ubase_t *stk;
|
||||
|
||||
/* The AAPCS64 requires 128-bit (16 byte) stack alignment */
|
||||
stk = (rt_ubase_t*)RT_ALIGN_DOWN((rt_ubase_t)stack_addr, 16);
|
||||
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
stk -= sizeof(rt_uint128_t) / sizeof(rt_ubase_t);
|
||||
|
||||
*(rt_uint128_t *)stk = (rt_uint128_t) { 0 };
|
||||
}
|
||||
|
||||
*(--stk) = (rt_ubase_t)texit; /* X20, 2nd param */
|
||||
*(--stk) = (rt_ubase_t)tentry; /* X19, 1st param */
|
||||
*(--stk) = (rt_ubase_t)22; /* X22 */
|
||||
*(--stk) = (rt_ubase_t)parameter; /* X21, 3rd param */
|
||||
*(--stk) = (rt_ubase_t)24; /* X24 */
|
||||
*(--stk) = (rt_ubase_t)23; /* X23 */
|
||||
*(--stk) = (rt_ubase_t)26; /* X26 */
|
||||
*(--stk) = (rt_ubase_t)25; /* X25 */
|
||||
*(--stk) = (rt_ubase_t)28; /* X28 */
|
||||
*(--stk) = (rt_ubase_t)27; /* X27 */
|
||||
*(--stk) = (rt_ubase_t)0; /* sp_el0 */
|
||||
*(--stk) = (rt_ubase_t)0; /* X29 - addr 0 as AAPCS64 specified */
|
||||
*(--stk) = (rt_ubase_t)0; /* FPSR */
|
||||
*(--stk) = (rt_ubase_t)0; /* FPCR */
|
||||
*(--stk) = INITIAL_SPSR_EL1; /* Save Processor States */
|
||||
*(--stk) = (rt_ubase_t)_thread_start; /* Exception return address. */
|
||||
|
||||
/* return task's current stack address */
|
||||
return (rt_uint8_t *)stk;
|
||||
}
|
||||
29
RT_Thread/libcpu/aarch64/common/stack_gcc.S
Normal file
29
RT_Thread/libcpu/aarch64/common/stack_gcc.S
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-18 Jesven the first version
|
||||
* 2023-06-24 Shell Support debug frame for user thread
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif
|
||||
|
||||
#include "rtconfig.h"
|
||||
#include "asm-generic.h"
|
||||
#include "asm-fpu.h"
|
||||
#include "armv8.h"
|
||||
|
||||
.section .text
|
||||
|
||||
START_POINT(_thread_start)
|
||||
mov x0, x21
|
||||
blr x19
|
||||
mov x29, #0
|
||||
blr x20
|
||||
b . /* never here */
|
||||
START_POINT_END(_thread_start)
|
||||
18
RT_Thread/libcpu/aarch64/common/startup_gcc.S
Normal file
18
RT_Thread/libcpu/aarch64/common/startup_gcc.S
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2018, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
|
||||
.global Reset_Handler
|
||||
.section ".start", "ax"
|
||||
Reset_Handler:
|
||||
nop
|
||||
|
||||
.text
|
||||
.weak SVC_Handler
|
||||
SVC_Handler:
|
||||
ret
|
||||
398
RT_Thread/libcpu/aarch64/common/trap.c
Normal file
398
RT_Thread/libcpu/aarch64/common/trap.c
Normal file
@ -0,0 +1,398 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2018, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2013-07-20 Bernard first version
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <rthw.h>
|
||||
#include <board.h>
|
||||
|
||||
#include <armv8.h>
|
||||
#include "interrupt.h"
|
||||
#include "mm_aspace.h"
|
||||
|
||||
#define DBG_TAG "libcpu.trap"
|
||||
#define DBG_LVL DBG_LOG
|
||||
#include <rtdbg.h>
|
||||
|
||||
#ifdef RT_USING_FINSH
|
||||
extern long list_thread(void);
|
||||
#endif
|
||||
|
||||
#ifdef RT_USING_LWP
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch.h>
|
||||
|
||||
#ifdef LWP_USING_CORE_DUMP
|
||||
#include <lwp_core_dump.h>
|
||||
#endif
|
||||
|
||||
static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
|
||||
{
|
||||
uint32_t is_user_fault;
|
||||
rt_thread_t th;
|
||||
|
||||
is_user_fault = !(regs->cpsr & 0x1f);
|
||||
if (is_user_fault)
|
||||
{
|
||||
rt_kprintf("%s! pc = 0x%x\n", info, regs->pc - pc_adj);
|
||||
}
|
||||
|
||||
/* user stack backtrace */
|
||||
th = rt_thread_self();
|
||||
if (th && th->lwp)
|
||||
{
|
||||
arch_backtrace_uthread(th);
|
||||
}
|
||||
|
||||
if (is_user_fault)
|
||||
{
|
||||
#ifdef LWP_USING_CORE_DUMP
|
||||
lwp_core_dump(regs, pc_adj);
|
||||
#endif
|
||||
sys_exit_group(-1);
|
||||
}
|
||||
}
|
||||
|
||||
rt_inline int _get_type(unsigned long esr)
|
||||
{
|
||||
int ret;
|
||||
int fsc = ARM64_ESR_EXTRACT_FSC(esr);
|
||||
switch (fsc)
|
||||
{
|
||||
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_0:
|
||||
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_1:
|
||||
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_2:
|
||||
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_3:
|
||||
ret = MM_FAULT_TYPE_PAGE_FAULT;
|
||||
break;
|
||||
case ARM64_FSC_PERMISSION_FAULT_LEVEL_0:
|
||||
case ARM64_FSC_PERMISSION_FAULT_LEVEL_1:
|
||||
case ARM64_FSC_PERMISSION_FAULT_LEVEL_2:
|
||||
case ARM64_FSC_PERMISSION_FAULT_LEVEL_3:
|
||||
ret = MM_FAULT_TYPE_RWX_PERM;
|
||||
break;
|
||||
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0:
|
||||
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1:
|
||||
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2:
|
||||
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3:
|
||||
/* access flag fault, not handle currently */
|
||||
default:
|
||||
ret = MM_FAULT_TYPE_GENERIC;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
rt_inline long _irq_is_disable(long cpsr)
|
||||
{
|
||||
return !!(cpsr & 0x80);
|
||||
}
|
||||
|
||||
static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
|
||||
{
|
||||
rt_ubase_t level;
|
||||
enum rt_mm_fault_op fault_op;
|
||||
enum rt_mm_fault_type fault_type;
|
||||
struct rt_lwp *lwp;
|
||||
void *dfar;
|
||||
int ret = 0;
|
||||
unsigned char ec = ARM64_ESR_EXTRACT_EC(esr);
|
||||
rt_bool_t is_write = ARM64_ABORT_WNR(esr);
|
||||
|
||||
switch (ec)
|
||||
{
|
||||
case ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION:
|
||||
fault_op = MM_FAULT_OP_EXECUTE;
|
||||
fault_type = _get_type(esr);
|
||||
break;
|
||||
case ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE:
|
||||
case ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION:
|
||||
case ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE:
|
||||
fault_op = is_write ? MM_FAULT_OP_WRITE : MM_FAULT_OP_READ;
|
||||
fault_type = _get_type(esr);
|
||||
break;
|
||||
default:
|
||||
/* non-fixable */
|
||||
fault_op = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* page fault exception only allow from user space */
|
||||
lwp = lwp_self();
|
||||
if (lwp && fault_op)
|
||||
{
|
||||
__asm__ volatile("mrs %0, far_el1":"=r"(dfar));
|
||||
struct rt_aspace_fault_msg msg = {
|
||||
.fault_op = fault_op,
|
||||
.fault_type = fault_type,
|
||||
.fault_vaddr = dfar,
|
||||
};
|
||||
|
||||
lwp_user_setting_save(rt_thread_self());
|
||||
__asm__ volatile("mrs %0, daif\nmsr daifclr, 0x3\nisb\n":"=r"(level));
|
||||
if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
|
||||
{
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile("msr daif, %0\nisb\n"::"r"(level));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* this function will show registers of CPU
|
||||
*
|
||||
* @param regs the registers point
|
||||
*/
|
||||
void rt_hw_show_register(struct rt_hw_exp_stack *regs)
|
||||
{
|
||||
rt_kprintf("Execption:\n");
|
||||
rt_kprintf("X00:0x%16.16p X01:0x%16.16p X02:0x%16.16p X03:0x%16.16p\n", (void *)regs->x0, (void *)regs->x1, (void *)regs->x2, (void *)regs->x3);
|
||||
rt_kprintf("X04:0x%16.16p X05:0x%16.16p X06:0x%16.16p X07:0x%16.16p\n", (void *)regs->x4, (void *)regs->x5, (void *)regs->x6, (void *)regs->x7);
|
||||
rt_kprintf("X08:0x%16.16p X09:0x%16.16p X10:0x%16.16p X11:0x%16.16p\n", (void *)regs->x8, (void *)regs->x9, (void *)regs->x10, (void *)regs->x11);
|
||||
rt_kprintf("X12:0x%16.16p X13:0x%16.16p X14:0x%16.16p X15:0x%16.16p\n", (void *)regs->x12, (void *)regs->x13, (void *)regs->x14, (void *)regs->x15);
|
||||
rt_kprintf("X16:0x%16.16p X17:0x%16.16p X18:0x%16.16p X19:0x%16.16p\n", (void *)regs->x16, (void *)regs->x17, (void *)regs->x18, (void *)regs->x19);
|
||||
rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
|
||||
rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
|
||||
rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
|
||||
rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
|
||||
rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
|
||||
rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
|
||||
}
|
||||
|
||||
#ifndef RT_USING_PIC
|
||||
static void _rt_hw_trap_irq(rt_interrupt_context_t irq_context)
|
||||
{
|
||||
#ifdef SOC_BCM283x
|
||||
extern rt_uint8_t core_timer_flag;
|
||||
void *param;
|
||||
uint32_t irq;
|
||||
rt_isr_handler_t isr_func;
|
||||
extern struct rt_irq_desc isr_table[];
|
||||
uint32_t value = 0;
|
||||
value = IRQ_PEND_BASIC & 0x3ff;
|
||||
|
||||
if(core_timer_flag != 0)
|
||||
{
|
||||
uint32_t cpu_id = rt_hw_cpu_id();
|
||||
uint32_t int_source = CORE_IRQSOURCE(cpu_id);
|
||||
if (int_source & 0x0f)
|
||||
{
|
||||
if (int_source & 0x08)
|
||||
{
|
||||
isr_func = isr_table[IRQ_ARM_TIMER].handler;
|
||||
#ifdef RT_USING_INTERRUPT_INFO
|
||||
isr_table[IRQ_ARM_TIMER].counter++;
|
||||
#endif
|
||||
if (isr_func)
|
||||
{
|
||||
param = isr_table[IRQ_ARM_TIMER].param;
|
||||
isr_func(IRQ_ARM_TIMER, param);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* local interrupt*/
|
||||
if (value)
|
||||
{
|
||||
if (value & (1 << 8))
|
||||
{
|
||||
value = IRQ_PEND1;
|
||||
irq = __rt_ffs(value) - 1;
|
||||
}
|
||||
else if (value & (1 << 9))
|
||||
{
|
||||
value = IRQ_PEND2;
|
||||
irq = __rt_ffs(value) + 31;
|
||||
}
|
||||
else
|
||||
{
|
||||
value &= 0x0f;
|
||||
irq = __rt_ffs(value) + 63;
|
||||
}
|
||||
|
||||
/* get interrupt service routine */
|
||||
isr_func = isr_table[irq].handler;
|
||||
#ifdef RT_USING_INTERRUPT_INFO
|
||||
isr_table[irq].counter++;
|
||||
#endif
|
||||
if (isr_func)
|
||||
{
|
||||
/* Interrupt for myself. */
|
||||
param = isr_table[irq].param;
|
||||
/* turn to interrupt service routine */
|
||||
isr_func(irq, param);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void *param;
|
||||
int ir, ir_self;
|
||||
rt_isr_handler_t isr_func;
|
||||
extern struct rt_irq_desc isr_table[];
|
||||
|
||||
ir = rt_hw_interrupt_get_irq();
|
||||
|
||||
if (ir == 1023)
|
||||
{
|
||||
/* Spurious interrupt */
|
||||
return;
|
||||
}
|
||||
|
||||
/* bit 10~12 is cpuid, bit 0~9 is interrupt id */
|
||||
ir_self = ir & 0x3ffUL;
|
||||
|
||||
/* get interrupt service routine */
|
||||
isr_func = isr_table[ir_self].handler;
|
||||
#ifdef RT_USING_INTERRUPT_INFO
|
||||
isr_table[ir_self].counter++;
|
||||
#ifdef RT_USING_SMP
|
||||
isr_table[ir_self].cpu_counter[rt_hw_cpu_id()]++;
|
||||
#endif
|
||||
#endif
|
||||
if (isr_func)
|
||||
{
|
||||
/* Interrupt for myself. */
|
||||
param = isr_table[ir_self].param;
|
||||
/* turn to interrupt service routine */
|
||||
isr_func(ir_self, param);
|
||||
}
|
||||
|
||||
/* end of interrupt */
|
||||
rt_hw_interrupt_ack(ir);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static void _rt_hw_trap_irq(struct rt_interrupt_context *this_ctx)
|
||||
{
|
||||
rt_pic_do_traps();
|
||||
}
|
||||
#endif
|
||||
|
||||
void rt_hw_trap_irq(struct rt_hw_exp_stack *regs)
|
||||
{
|
||||
struct rt_interrupt_context this_ctx = {
|
||||
.context = regs,
|
||||
.node = RT_SLIST_OBJECT_INIT(this_ctx.node),
|
||||
};
|
||||
|
||||
rt_interrupt_context_push(&this_ctx);
|
||||
_rt_hw_trap_irq(&this_ctx);
|
||||
rt_interrupt_context_pop();
|
||||
}
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
#define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
|
||||
#else
|
||||
#define DBG_CHECK_EVENT(regs, esr) (0)
|
||||
#endif
|
||||
|
||||
#ifndef RT_USING_PIC
|
||||
void rt_hw_trap_fiq(void)
|
||||
{
|
||||
void *param;
|
||||
int ir, ir_self;
|
||||
rt_isr_handler_t isr_func;
|
||||
extern struct rt_irq_desc isr_table[];
|
||||
|
||||
ir = rt_hw_interrupt_get_irq();
|
||||
|
||||
/* bit 10~12 is cpuid, bit 0~9 is interrup id */
|
||||
ir_self = ir & 0x3ffUL;
|
||||
|
||||
/* get interrupt service routine */
|
||||
isr_func = isr_table[ir_self].handler;
|
||||
param = isr_table[ir_self].param;
|
||||
|
||||
/* turn to interrupt service routine */
|
||||
isr_func(ir_self, param);
|
||||
|
||||
/* end of interrupt */
|
||||
rt_hw_interrupt_ack(ir);
|
||||
}
|
||||
#else
|
||||
void rt_hw_trap_fiq(void)
|
||||
{
|
||||
rt_pic_do_traps();
|
||||
}
|
||||
#endif
|
||||
|
||||
void print_exception(unsigned long esr, unsigned long epc);
|
||||
void SVC_Handler(struct rt_hw_exp_stack *regs);
|
||||
void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
|
||||
{
|
||||
unsigned long esr;
|
||||
unsigned char ec;
|
||||
|
||||
asm volatile("mrs %0, esr_el1":"=r"(esr));
|
||||
ec = (unsigned char)((esr >> 26) & 0x3fU);
|
||||
|
||||
if (DBG_CHECK_EVENT(regs, esr))
|
||||
{
|
||||
return;
|
||||
}
|
||||
else if (ec == 0x15) /* is 64bit syscall ? */
|
||||
{
|
||||
SVC_Handler(regs);
|
||||
/* never return here */
|
||||
}
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
/**
|
||||
* Note: check_user_stack will take lock and it will possibly be a dead-lock
|
||||
* if exception comes from kernel.
|
||||
*/
|
||||
if ((regs->cpsr & 0x1f) == 0)
|
||||
{
|
||||
if (user_fault_fixable(esr, regs))
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_irq_is_disable(regs->cpsr))
|
||||
{
|
||||
LOG_E("Kernel fault from interrupt/critical section");
|
||||
}
|
||||
if (rt_critical_level() != 0)
|
||||
{
|
||||
LOG_E("scheduler is not available");
|
||||
}
|
||||
else if (user_fault_fixable(esr, regs))
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
print_exception(esr, regs->pc);
|
||||
rt_hw_show_register(regs);
|
||||
LOG_E("current thread: %s\n", rt_thread_self()->parent.name);
|
||||
|
||||
#ifdef RT_USING_FINSH
|
||||
list_thread();
|
||||
#endif
|
||||
|
||||
#ifdef RT_USING_LWP
|
||||
/* restore normal execution environment */
|
||||
__asm__ volatile("msr daifclr, 0x3\ndmb ishst\nisb\n");
|
||||
_check_fault(regs, 0, "user fault");
|
||||
#endif
|
||||
|
||||
struct rt_hw_backtrace_frame frame = {.fp = regs->x29, .pc = regs->pc};
|
||||
rt_backtrace_frame(rt_thread_self(), &frame);
|
||||
rt_hw_cpu_shutdown();
|
||||
}
|
||||
|
||||
void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
|
||||
{
|
||||
rt_kprintf("SError\n");
|
||||
rt_hw_show_register(regs);
|
||||
rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
|
||||
#ifdef RT_USING_FINSH
|
||||
list_thread();
|
||||
#endif
|
||||
rt_hw_cpu_shutdown();
|
||||
}
|
||||
120
RT_Thread/libcpu/aarch64/common/up/context_gcc.S
Normal file
120
RT_Thread/libcpu/aarch64/common/up/context_gcc.S
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-18 Jesven the first version
|
||||
* 2023-06-24 Shell Support backtrace for user thread
|
||||
* 2024-01-06 Shell Fix barrier on irq_disable/enable
|
||||
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif
|
||||
|
||||
#include "context_gcc.h"
|
||||
#include "../include/vector_gcc.h"
|
||||
|
||||
#include <rtconfig.h>
|
||||
#include <asm-generic.h>
|
||||
#include <asm-fpu.h>
|
||||
#include <armv8.h>
|
||||
|
||||
/**
|
||||
* Context switch status
|
||||
*/
|
||||
.section .bss
|
||||
rt_interrupt_from_thread:
|
||||
.quad 0
|
||||
rt_interrupt_to_thread:
|
||||
.quad 0
|
||||
rt_thread_switch_interrupt_flag:
|
||||
.quad 0
|
||||
|
||||
.section .text
|
||||
|
||||
/*
|
||||
* void rt_hw_context_switch_to(rt_ubase_t to);
|
||||
* X0 --> to sp
|
||||
*/
|
||||
.globl rt_hw_context_switch_to
|
||||
rt_hw_context_switch_to:
|
||||
clrex
|
||||
ldr x0, [x0]
|
||||
RESTORE_CONTEXT_SWITCH x0
|
||||
NEVER_RETURN
|
||||
|
||||
/*
|
||||
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
|
||||
* X0 --> from sp
|
||||
* X1 --> to sp
|
||||
* X2 --> to thread
|
||||
*/
|
||||
.globl rt_hw_context_switch
|
||||
rt_hw_context_switch:
|
||||
clrex
|
||||
SAVE_CONTEXT_SWITCH x19, x20
|
||||
|
||||
mov x2, sp
|
||||
str x2, [x0] // store sp in preempted tasks TCB
|
||||
ldr x0, [x1] // get new task stack pointer
|
||||
|
||||
RESTORE_CONTEXT_SWITCH x0
|
||||
NEVER_RETURN
|
||||
|
||||
.globl rt_thread_switch_interrupt_flag
|
||||
.globl rt_interrupt_from_thread
|
||||
.globl rt_interrupt_to_thread
|
||||
.globl rt_hw_context_switch_interrupt
|
||||
|
||||
/*
|
||||
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread);
|
||||
*/
|
||||
rt_hw_context_switch_interrupt:
|
||||
ldr x6, =rt_thread_switch_interrupt_flag
|
||||
ldr x7, [x6]
|
||||
cmp x7, #1
|
||||
b.eq _reswitch
|
||||
|
||||
/* set rt_interrupt_from_thread */
|
||||
ldr x4, =rt_interrupt_from_thread
|
||||
str x0, [x4]
|
||||
|
||||
/* set rt_thread_switch_interrupt_flag to 1 */
|
||||
mov x7, #1
|
||||
str x7, [x6]
|
||||
|
||||
stp x1, x30, [sp, #-0x10]!
|
||||
#ifdef RT_USING_SMART
|
||||
mov x0, x2
|
||||
bl lwp_user_setting_save
|
||||
#endif
|
||||
ldp x1, x30, [sp], #0x10
|
||||
_reswitch:
|
||||
ldr x6, =rt_interrupt_to_thread // set rt_interrupt_to_thread
|
||||
str x1, [x6]
|
||||
ret
|
||||
|
||||
.globl rt_hw_context_switch_interrupt_do
|
||||
|
||||
/**
|
||||
* rt_hw_context_switch_interrupt_do(void)
|
||||
*/
|
||||
rt_hw_context_switch_interrupt_do:
|
||||
clrex
|
||||
SAVE_CONTEXT_SWITCH_FAST
|
||||
|
||||
ldr x3, =rt_interrupt_from_thread
|
||||
ldr x4, [x3]
|
||||
mov x0, sp
|
||||
str x0, [x4] // store sp in preempted tasks's tcb
|
||||
|
||||
ldr x3, =rt_interrupt_to_thread
|
||||
ldr x4, [x3]
|
||||
ldr x0, [x4] // get new task's stack pointer
|
||||
|
||||
RESTORE_CONTEXT_SWITCH x0
|
||||
NEVER_RETURN
|
||||
83
RT_Thread/libcpu/aarch64/common/up/context_gcc.h
Normal file
83
RT_Thread/libcpu/aarch64/common/up/context_gcc.h
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-03-25 Shell Trimming unecessary ops and
|
||||
* improve the performance of ctx switch
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_CONTEXT_H__
|
||||
#define __ARM64_CONTEXT_H__
|
||||
|
||||
#include "../include/context_gcc.h"
|
||||
|
||||
#include <rtconfig.h>
|
||||
#include <asm-generic.h>
|
||||
#include <asm-fpu.h>
|
||||
#include <armv8.h>
|
||||
|
||||
/* restore address space */
|
||||
.macro RESTORE_ADDRESS_SPACE
|
||||
#ifdef RT_USING_SMART
|
||||
bl rt_thread_self
|
||||
mov x19, x0
|
||||
bl lwp_aspace_switch
|
||||
mov x0, x19
|
||||
bl lwp_user_setting_restore
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro RESTORE_CONTEXT_SWITCH using_sp
|
||||
/* Set the SP to point to the stack of the task being restored. */
|
||||
mov sp, \using_sp
|
||||
|
||||
RESTORE_ADDRESS_SPACE
|
||||
|
||||
_RESTORE_CONTEXT_SWITCH
|
||||
.endm
|
||||
|
||||
.macro RESTORE_IRQ_CONTEXT
|
||||
#ifdef RT_USING_SMART
|
||||
BL rt_thread_self
|
||||
MOV X19, X0
|
||||
BL lwp_aspace_switch
|
||||
MOV X0, X19
|
||||
BL lwp_user_setting_restore
|
||||
#endif
|
||||
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
|
||||
|
||||
TST X3, #0x1f
|
||||
MSR SPSR_EL1, X3
|
||||
MSR ELR_EL1, X2
|
||||
|
||||
LDP X29, X30, [SP], #0x10
|
||||
MSR SP_EL0, X29
|
||||
LDP X28, X29, [SP], #0x10
|
||||
MSR FPCR, X28
|
||||
MSR FPSR, X29
|
||||
LDP X28, X29, [SP], #0x10
|
||||
LDP X26, X27, [SP], #0x10
|
||||
LDP X24, X25, [SP], #0x10
|
||||
LDP X22, X23, [SP], #0x10
|
||||
LDP X20, X21, [SP], #0x10
|
||||
LDP X18, X19, [SP], #0x10
|
||||
LDP X16, X17, [SP], #0x10
|
||||
LDP X14, X15, [SP], #0x10
|
||||
LDP X12, X13, [SP], #0x10
|
||||
LDP X10, X11, [SP], #0x10
|
||||
LDP X8, X9, [SP], #0x10
|
||||
LDP X6, X7, [SP], #0x10
|
||||
LDP X4, X5, [SP], #0x10
|
||||
LDP X2, X3, [SP], #0x10
|
||||
LDP X0, X1, [SP], #0x10
|
||||
RESTORE_FPU SP
|
||||
#ifdef RT_USING_SMART
|
||||
BEQ arch_ret_to_user
|
||||
#endif
|
||||
ERET
|
||||
.endm
|
||||
|
||||
#endif /* __ARM64_CONTEXT_H__ */
|
||||
61
RT_Thread/libcpu/aarch64/common/up/vector_gcc.S
Normal file
61
RT_Thread/libcpu/aarch64/common/up/vector_gcc.S
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif
|
||||
|
||||
#include "../include/vector_gcc.h"
|
||||
#include "context_gcc.h"
|
||||
|
||||
#include <rtconfig.h>
|
||||
#include <asm-generic.h>
|
||||
#include <asm-fpu.h>
|
||||
#include <armv8.h>
|
||||
|
||||
.section .text
|
||||
|
||||
vector_fiq:
|
||||
.align 8
|
||||
.globl vector_fiq
|
||||
|
||||
SAVE_IRQ_CONTEXT
|
||||
bl rt_hw_trap_fiq
|
||||
|
||||
b rt_hw_irq_exit
|
||||
|
||||
.globl rt_thread_switch_interrupt_flag
|
||||
.globl rt_hw_context_switch_interrupt_do
|
||||
|
||||
/**
|
||||
* void rt_hw_vector_irq_sched(void *eframe)
|
||||
* @brief do IRQ scheduling
|
||||
*/
|
||||
rt_hw_vector_irq_sched:
|
||||
.globl rt_hw_vector_irq_sched
|
||||
.align 8
|
||||
|
||||
/**
|
||||
* if rt_thread_switch_interrupt_flag set, jump to
|
||||
* rt_hw_context_switch_interrupt_do and don't return
|
||||
*/
|
||||
ldr x1, =rt_thread_switch_interrupt_flag
|
||||
ldr x2, [x1]
|
||||
cmp x2, #1
|
||||
bne 1f
|
||||
|
||||
/* clear flag */
|
||||
mov x2, #0
|
||||
str x2, [x1]
|
||||
|
||||
bl rt_hw_context_switch_interrupt_do
|
||||
|
||||
1:
|
||||
b rt_hw_irq_exit
|
||||
134
RT_Thread/libcpu/aarch64/common/vector_gcc.S
Normal file
134
RT_Thread/libcpu/aarch64/common/vector_gcc.S
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2018-10-06 ZhaoXiaowei the first version
|
||||
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif
|
||||
|
||||
#include <rtconfig.h>
|
||||
|
||||
.text
|
||||
.globl system_vectors
|
||||
.globl vector_exception
|
||||
.globl vector_irq
|
||||
.globl vector_fiq
|
||||
|
||||
system_vectors:
|
||||
.align 11
|
||||
.set VBAR, system_vectors
|
||||
.org VBAR
|
||||
|
||||
/* Exception from CurrentEL (EL1) with SP_EL0 (SPSEL=1) */
|
||||
.org (VBAR + 0x00 + 0)
|
||||
b vector_serror /* Synchronous */
|
||||
.org (VBAR + 0x80 + 0)
|
||||
b vector_serror /* IRQ/vIRQ */
|
||||
.org (VBAR + 0x100 + 0)
|
||||
b vector_serror /* FIQ/vFIQ */
|
||||
.org (VBAR + 0x180 + 0)
|
||||
b vector_serror /* Error/vError */
|
||||
|
||||
/* Exception from CurrentEL (EL1) with SP_ELn */
|
||||
.org (VBAR + 0x200 + 0)
|
||||
b vector_exception /* Synchronous */
|
||||
.org (VBAR + 0x280 + 0)
|
||||
b vector_irq /* IRQ/vIRQ */
|
||||
.org (VBAR + 0x300 + 0)
|
||||
b vector_fiq /* FIQ/vFIQ */
|
||||
.org (VBAR + 0x380 + 0)
|
||||
b vector_serror
|
||||
|
||||
/* Exception from lower EL, aarch64 */
|
||||
.org (VBAR + 0x400 + 0)
|
||||
b vector_exception
|
||||
.org (VBAR + 0x480 + 0)
|
||||
b vector_irq
|
||||
.org (VBAR + 0x500 + 0)
|
||||
b vector_fiq
|
||||
.org (VBAR + 0x580 + 0)
|
||||
b vector_serror
|
||||
|
||||
/* Exception from lower EL, aarch32 */
|
||||
.org (VBAR + 0x600 + 0)
|
||||
b vector_serror
|
||||
.org (VBAR + 0x680 + 0)
|
||||
b vector_serror
|
||||
.org (VBAR + 0x700 + 0)
|
||||
b vector_serror
|
||||
.org (VBAR + 0x780 + 0)
|
||||
b vector_serror
|
||||
|
||||
#include "include/vector_gcc.h"
|
||||
#define EFRAMEX x19
|
||||
|
||||
START_POINT(vector_exception)
|
||||
SAVE_IRQ_CONTEXT
|
||||
mov EFRAMEX, sp
|
||||
|
||||
SAVE_USER_CTX EFRAMEX, x0
|
||||
|
||||
mov x0, EFRAMEX
|
||||
bl rt_hw_trap_exception
|
||||
RESTORE_USER_CTX EFRAMEX, x0
|
||||
|
||||
/* do exception switch for IRQ/exception handlers */
|
||||
EXCEPTION_SWITCH sp, x0
|
||||
|
||||
RESTORE_IRQ_CONTEXT
|
||||
eret
|
||||
START_POINT_END(vector_exception)
|
||||
|
||||
START_POINT(vector_serror)
|
||||
SAVE_IRQ_CONTEXT
|
||||
mov EFRAMEX, sp
|
||||
|
||||
SAVE_USER_CTX EFRAMEX, x0
|
||||
|
||||
mov x0, EFRAMEX
|
||||
bl rt_hw_trap_serror
|
||||
|
||||
RESTORE_USER_CTX EFRAMEX, x0
|
||||
|
||||
NEVER_RETURN
|
||||
START_POINT_END(vector_serror)
|
||||
|
||||
START_POINT(vector_irq)
|
||||
SAVE_IRQ_CONTEXT
|
||||
mov EFRAMEX, sp
|
||||
|
||||
/* trace IRQ level */
|
||||
bl rt_interrupt_enter
|
||||
|
||||
SAVE_USER_CTX EFRAMEX, x0
|
||||
|
||||
/* handline IRQ */
|
||||
mov x0, EFRAMEX
|
||||
bl rt_hw_trap_irq
|
||||
|
||||
RESTORE_USER_CTX EFRAMEX, x0
|
||||
|
||||
/* restore IRQ level */
|
||||
bl rt_interrupt_leave
|
||||
|
||||
mov x0, EFRAMEX
|
||||
bl rt_hw_vector_irq_sched
|
||||
|
||||
b rt_hw_irq_exit
|
||||
START_POINT_END(vector_irq)
|
||||
|
||||
rt_hw_irq_exit:
|
||||
.globl rt_hw_irq_exit
|
||||
|
||||
/* do exception switch for IRQ/exception handlers */
|
||||
EXCEPTION_SWITCH sp, x0
|
||||
|
||||
RESTORE_IRQ_CONTEXT
|
||||
eret
|
||||
Reference in New Issue
Block a user