原始版本

This commit is contained in:
冯佳
2025-06-19 21:56:46 +08:00
parent fe98e5f010
commit a4841450cf
4152 changed files with 1910684 additions and 0 deletions

View File

@ -0,0 +1,19 @@
# RT-Thread building script for bridge
import os
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
group = []
list = os.listdir(cwd)
# add common code files
group = group + SConscript(os.path.join('common', 'SConscript'))
# cpu porting code files
if rtconfig.CPU != 'common':
group = group + SConscript(os.path.join(rtconfig.CPU, 'SConscript'))
Return('group')

View File

@ -0,0 +1,9 @@
menu "RT-Thread MIPS CPU"
config RT_USING_FPU
bool "Using Float Point Unit"
default n
help
Using Float Point Unit in code.
endmenu

View File

@ -0,0 +1,13 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,306 @@
/*
* Assembly Macros For MIPS
*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef __ASM_H__
#define __ASM_H__
#include <rtconfig.h>
/*
* LEAF - declare leaf routine
*/
#define LEAF(symbol) \
.globl symbol; \
.align 2; \
.type symbol,@function; \
.ent symbol,0; \
symbol: .frame sp,0,ra
/*
* NESTED - declare nested routine entry point
*/
#define NESTED(symbol, framesize, rpc) \
.globl symbol; \
.align 2; \
.type symbol,@function; \
.ent symbol,0; \
symbol: .frame sp, framesize, rpc
/*
* END - mark end of function
*/
#define END(function) \
.end function; \
.size function,.-function
/*
* EXPORT - export definition of symbol
*/
#define EXPORT(symbol) \
.globl symbol; \
symbol:
/*
* FEXPORT - export definition of a function symbol
*/
#define FEXPORT(symbol) \
.globl symbol; \
.type symbol,@function; \
symbol:
/*
* Global data declaration with size.
*/
#define EXPORTS(name,sz) \
.globl name; \
.type name,@object; \
.size name,sz; \
name:
/*
* Weak data declaration with size.
*/
#define WEXPORT(name,sz) \
.weakext name; \
.type name,@object; \
.size name,sz; \
name:
/*
* Global data reference with size.
*/
#define IMPORT(name, size) \
.extern name,size
/*
* Global zeroed data.
*/
#define BSS(name,size) \
.type name,@object; \
.comm name,size
/*
* Local zeroed data.
*/
#define LBSS(name,size) \
.lcomm name,size
/*
* ABS - export absolute symbol
*/
#define ABS(symbol,value) \
.globl symbol; \
symbol = value
#define TEXT(msg) \
.pushsection .data; \
8: .asciiz msg; \
.popsection;
#define ENTRY(name) \
.globl name; \
.align 2; \
.ent name,0; \
name##:
/*
* Macros to handle different pointer/register sizes for 32/64-bit code
*/
#if defined ARCH_MIPS64
/*
* Size of a register
*/
#define SZREG 8
/*
* Use the following macros in assemblercode to load/store registers,
* pointers etc.
*/
#define REG_S sd
#define REG_L ld
#define REG_SUBU dsubu
#define REG_ADDU daddu
/*
* How to add/sub/load/store/shift C int variables.
*/
#define INT_ADD dadd
#define INT_ADDU daddu
#define INT_ADDI daddi
#define INT_ADDIU daddiu
#define INT_SUB dsub
#define INT_SUBU dsubu
#define INT_L ld
#define INT_S sd
#define INT_SLL dsll
#define INT_SLLV dsllv
#define INT_SRL dsrl
#define INT_SRLV dsrlv
#define INT_SRA dsra
#define INT_SRAV dsrav
/*
* Use the following macros in assemblercode to load/store registers,
* pointers etc.
*/
#define LONG_ADD dadd
#define LONG_ADDU daddu
#define LONG_ADDI daddi
#define LONG_ADDIU daddiu
#define LONG_SUB dsub
#define LONG_SUBU dsubu
#define LONG_L ld
#define LONG_S sd
#define LONG_SP sdp
#define LONG_SLL dsll
#define LONG_SLLV dsllv
#define LONG_SRL dsrl
#define LONG_SRLV dsrlv
#define LONG_SRA dsra
#define LONG_SRAV dsrav
#define LONG .dword
#define LONGSIZE 8
#define LONGMASK 7
#define LONGLOG 3
/*
* How to add/sub/load/store/shift pointers.
*/
#define PTR_ADD dadd
#define PTR_ADDU daddu
#define PTR_ADDI daddi
#define PTR_ADDIU daddiu
#define PTR_SUB dsub
#define PTR_SUBU dsubu
#define PTR_L ld
#define PTR_S sd
#define PTR_LA dla
#define PTR_LI dli
#define PTR_SLL dsll
#define PTR_SLLV dsllv
#define PTR_SRL dsrl
#define PTR_SRLV dsrlv
#define PTR_SRA dsra
#define PTR_SRAV dsrav
#define PTR_SCALESHIFT 3
#define PTR .dword
#define PTRSIZE 8
#define PTRLOG 3
#define MFC0 dmfc0
#define MTC0 dmtc0
#else
/*
* Size of a register
*/
#define SZREG 4
/*
* Use the following macros in assemblercode to load/store registers,
* pointers etc.
*/
#define REG_S sw
#define REG_L lw
#define REG_SUBU subu
#define REG_ADDU addu
/*
* How to add/sub/load/store/shift C int variables.
*/
#define INT_ADD add
#define INT_ADDU addu
#define INT_ADDI addi
#define INT_ADDIU addiu
#define INT_SUB sub
#define INT_SUBU subu
#define INT_L lw
#define INT_S sw
#define INT_SLL sll
#define INT_SLLV sllv
#define INT_SRL srl
#define INT_SRLV srlv
#define INT_SRA sra
#define INT_SRAV srav
/*
* How to add/sub/load/store/shift C long variables.
*/
#define LONG_ADD add
#define LONG_ADDU addu
#define LONG_ADDI addi
#define LONG_ADDIU addiu
#define LONG_SUB sub
#define LONG_SUBU subu
#define LONG_L lw
#define LONG_S sw
#define LONG_SLL sll
#define LONG_SLLV sllv
#define LONG_SRL srl
#define LONG_SRLV srlv
#define LONG_SRA sra
#define LONG_SRAV srav
#define LONG .word
#define LONGSIZE 4
#define LONGMASK 3
#define LONGLOG 2
/*
* How to add/sub/load/store/shift pointers.
*/
#define PTR_ADD add
#define PTR_ADDU addu
#define PTR_ADDI addi
#define PTR_ADDIU addiu
#define PTR_SUB sub
#define PTR_SUBU subu
#define PTR_L lw
#define PTR_S sw
#define PTR_LA la
#define PTR_SLL sll
#define PTR_SLLV sllv
#define PTR_SRL srl
#define PTR_SRLV srlv
#define PTR_SRA sra
#define PTR_SRAV srav
#define PTR_SCALESHIFT 2
#define PTR .word
#define PTRSIZE 4
#define PTRLOG 2
/*
* Some cp0 registers were extended to 64bit for MIPS III.
*/
#define MFC0 mfc0
#define MTC0 mtc0
#endif
#define SSNOP sll zero, zero, 1
#endif /* end of __ASM_H__ */

View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
* 2020-07-26 lizhirui Fixed some problems
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "mips_regs.h"
#include "stackframe.h"
.section ".text", "ax"
.set noreorder
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)
* a0 --> from
* a1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
MTC0 ra, CP0_EPC
SAVE_ALL
REG_S sp, 0(a0) /* store sp in preempted tasks TCB */
REG_L sp, 0(a1) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch_to(rt_uint32 to)/*
* a0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
REG_L sp, 0(a0) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
PTR_LA t0, rt_thread_switch_interrupt_flag
REG_L t1, 0(t0)
nop
bnez t1, _reswitch
nop
li t1, 0x01 /* set rt_thread_switch_interrupt_flag to 1 */
LONG_S t1, 0(t0)
PTR_LA t0, rt_interrupt_from_thread /* set rt_interrupt_from_thread */
LONG_S a0, 0(t0)
_reswitch:
PTR_LA t0, rt_interrupt_to_thread /* set rt_interrupt_to_thread */
LONG_S a1, 0(t0)
jr ra
nop
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_general_exc_dispatch
.globl mips_irq_handle
mips_irq_handle:
SAVE_ALL
/* let k0 keep the current context sp */
move k0, sp
/* switch to kernel stack */
PTR_LA sp, _system_stack
jal rt_interrupt_enter
nop
/* Get Old SP from k0 as paremeter in a0 */
move a0, k0
jal rt_general_exc_dispatch
nop
jal rt_interrupt_leave
nop
/* switch sp back to thread context */
move sp, k0
/*
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and do not return
*/
PTR_LA k0, rt_thread_switch_interrupt_flag
LONG_L k1, 0(k0)
beqz k1, spurious_interrupt
nop
LONG_S zero, 0(k0) /* clear flag */
nop
/*
* switch to the new thread
*/
PTR_LA k0, rt_interrupt_from_thread
LONG_L k1, 0(k0)
nop
LONG_S sp, 0(k1) /* store sp in preempted task TCB */
PTR_LA k0, rt_interrupt_to_thread
LONG_L k1, 0(k0)
nop
LONG_L sp, 0(k1) /* get new task stack pointer */
j spurious_interrupt
nop
spurious_interrupt:
RESTORE_ALL_AND_RET
.set reorder

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
#include <rtconfig.h>
#include "asm.h"
#include <rtconfig.h>
.section ".start", "ax"
.set noreorder
/* the program entry */
.globl _rtthread_entry
_rtthread_entry:
#ifndef RT_USING_SELF_BOOT
.globl _start
_start:
#endif
PTR_LA ra, _rtthread_entry
/* disable interrupt */
MTC0 zero, CP0_CAUSE
MTC0 zero, CP0_STATUS # Set CPU to disable interrupt.
ehb
#ifdef ARCH_MIPS64
dli t0, ST0_KX
MTC0 t0, CP0_STATUS
#endif
/* setup stack pointer */
PTR_LA sp, _system_stack
PTR_LA gp, _gp
bal rt_cpu_early_init
nop
/* clear bss */
PTR_LA t0, __bss_start
PTR_LA t1, __bss_end
_clr_bss_loop:
sw zero, 0(t0)
bne t1, t0, _clr_bss_loop
addu t0, 4
/* jump to RT-Thread RTOS */
jal rtthread_startup
nop
/* restart, never die */
j _start
nop

View File

@ -0,0 +1,212 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#include <rtthread.h>
#include <rthw.h>
#include "exception.h"
#include "mips_regs.h"
/**
* @addtogroup MIPS
*/
/*@{*/
extern rt_ubase_t __ebase_entry;
rt_ubase_t rt_interrupt_from_thread;
rt_ubase_t rt_interrupt_to_thread;
rt_ubase_t rt_thread_switch_interrupt_flag;
const char *exception_name[] = {
"Interrupt",
"(X)TLB Modify Exception",
"(X)TLB Read/Fetch Exception",
"(X)TLB Write Exception",
"Address Read/Fetch Exception",
"Address Write Exception",
"",
"",
"Syscall",
"Breakpoint",
"Reversed Instruction Exception",
"Coprocessor Unit Invalid",
"Overflow",
"Trap",
"FPU Exception in Vector Instruction",
"FPU Exception",
"Loongson Custom Exception",
"",
"",
"(X)TLB Read Denied Exception",
"(X)TLB Execute Denied Exception",
"Vector Module Disabled Exception",
"",
"",
"",
"",
"",
"",
"",
"",
"Cache Error Exception",
""
};
rt_base_t rt_hw_interrupt_disable(void)
{
register rt_base_t status = read_c0_status();
clear_c0_status(ST0_IE);
return status;
}
void rt_hw_interrupt_enable(rt_base_t level)
{
write_c0_status(level);
}
/**
* exception handle table
*/
#define RT_EXCEPTION_MAX 32
exception_func_t sys_exception_handlers[RT_EXCEPTION_MAX];
/**
* setup the exception handle
*/
exception_func_t rt_set_except_vector(int n, exception_func_t func)
{
exception_func_t old_handler;
if ((n < 0) || (n >= RT_EXCEPTION_MAX) || (!func))
{
return 0;
}
old_handler = sys_exception_handlers[n];
sys_exception_handlers[n] = func;
return old_handler;
}
void mips_dump_regs(struct pt_regs *regs)
{
int i, j;
for (i = 0; i < 32 / 4; i++)
{
for (j = 0; j < 4; j++)
{
int reg = 4 * i + j;
rt_kprintf("%d: 0x%08x, ", reg, regs->regs[reg]);
}
rt_kprintf("\n");
}
}
void tlb_refill_handler(void)
{
rt_kprintf("TLB-Miss Happens, EPC: 0x%08x\n", read_c0_epc());
rt_hw_cpu_shutdown();
}
void cache_error_handler(void)
{
rt_kprintf("Cache Exception Happens, EPC: 0x%08x\n", read_c0_epc());
rt_hw_cpu_shutdown();
}
static void unhandled_exception_handle(struct pt_regs *regs)
{
rt_kprintf("Unknown Exception, EPC: 0x%p, CAUSE: 0x%08x\n", read_c0_epc(), read_c0_cause());
rt_kprintf("Exception Name:%s\n",exception_name[(read_c0_cause() >> 2) & 0x1f]);
#ifdef SOC_LS2K1000
rt_kprintf("ExeCode = 0x%08x,BadAddr = 0x%p\n",(read_c0_cause() >> 2) & 0x1f,mmu_tlb_get_bad_vaddr());
#else
rt_kprintf("ExeCode = 0x%08x\n",(read_c0_cause() >> 2) & 0x1f);
#endif
rt_kprintf("ST0: 0x%08x ",regs->cp0_status);
rt_kprintf("ErrorPC: 0x%p\n",read_c0_errorepc());
mips_dump_regs(regs);
rt_hw_cpu_shutdown();
}
static void install_default_exception_handler(void)
{
rt_int32_t i;
for (i = 0; i < RT_EXCEPTION_MAX; i++)
sys_exception_handlers[i] =
(exception_func_t)unhandled_exception_handle;
}
int rt_hw_exception_init(void)
{
rt_ubase_t ebase = (rt_ubase_t)&__ebase_entry;
#ifdef ARCH_MIPS64
ebase |= 0xffffffff00000000;
#endif
write_c0_ebase(ebase);
clear_c0_status(ST0_BEV | ST0_ERL | ST0_EXL);
clear_c0_status(ST0_IM | ST0_IE);
set_c0_status(ST0_CU0);
/* install the default exception handler */
install_default_exception_handler();
return RT_EOK;
}
void rt_general_exc_dispatch(struct pt_regs *regs)
{
rt_ubase_t cause, exccode;
cause = read_c0_cause();
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
if (exccode == 0)
{
rt_ubase_t status, pending;
status = read_c0_status();
pending = (cause & CAUSEF_IP) & (status & ST0_IM);
if (pending & CAUSEF_IP0)
rt_do_mips_cpu_irq(0);
if (pending & CAUSEF_IP1)
rt_do_mips_cpu_irq(1);
if (pending & CAUSEF_IP2)
rt_do_mips_cpu_irq(2);
if (pending & CAUSEF_IP3)
rt_do_mips_cpu_irq(3);
if (pending & CAUSEF_IP4)
rt_do_mips_cpu_irq(4);
if (pending & CAUSEF_IP5)
rt_do_mips_cpu_irq(5);
if (pending & CAUSEF_IP6)
rt_do_mips_cpu_irq(6);
if (pending & CAUSEF_IP7)
rt_do_mips_cpu_irq(7);
}
else
{
if (sys_exception_handlers[exccode])
sys_exception_handlers[exccode](regs);
}
}
/* Mask means disable the interrupt */
void mips_mask_cpu_irq(rt_uint32_t irq)
{
clear_c0_status(1 << (STATUSB_IP0 + irq));
}
/* Unmask means enable the interrupt */
void mips_unmask_cpu_irq(rt_uint32_t irq)
{
set_c0_status(1 << (STATUSB_IP0 + irq));
}
/*@}*/

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef __EXCEPTION_H__
#define __EXCEPTION_H__
#include "ptrace.h"
#ifndef __ASSEMBLY__
typedef void (* exception_func_t)(struct pt_regs *regs);
extern int rt_hw_exception_init(void);
extern exception_func_t sys_exception_handlers[];
extern void rt_do_mips_cpu_irq(rt_uint32_t ip);
exception_func_t rt_set_except_vector(int n, exception_func_t func);
extern void mips_mask_cpu_irq(rt_uint32_t irq);
extern void mips_unmask_cpu_irq(rt_uint32_t irq);
#endif
#endif /* end of __EXCEPTION_H__ */

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
* 2020-07-26 lizhirui Add xtlb exception entry
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
.section ".exc_vectors", "ax"
.extern tlb_refill_handler
.extern cache_error_handler
.extern mips_irq_handle
/* 0x0 - TLB refill handler */
.global tlb_refill_exception
.type tlb_refill_exception,@function
ebase_start:
tlb_refill_exception:
b _general_exception_handler
nop
/* 0x080 - XTLB refill handler */
.org ebase_start + 0x080
b _general_exception_handler
nop
/* 0x100 - Cache error handler */
.org ebase_start + 0x100
j cache_error_handler
nop
/* 0x180 - Exception/Interrupt handler */
.global general_exception
.type general_exception,@function
.org ebase_start + 0x180
general_exception:
b _general_exception_handler
nop
/* 0x200 - Special Exception Interrupt handler (when IV is set in CP0_CAUSE) */
.global irq_exception
.type irq_exception,@function
.org ebase_start + 0x200
irq_exception:
b _general_exception_handler
nop
/* general exception handler */
_general_exception_handler:
.set noreorder
PTR_LA k0, mips_irq_handle
jr k0
nop
.set reorder
/* interrupt handler */
_irq_handler:
.set noreorder
PTR_LA k0, mips_irq_handle
jr k0
nop
.set reorder

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-09-07 Urey first version
*/
#ifndef _COMMON_MIPS_H_
#define _COMMON_MIPS_H_
#include "mips_cfg.h"
#include "ptrace.h"
#include "mips_types.h"
#include "asm.h"
#include "mips_regs.h"
#include "mips_addrspace.h"
#include "mips_cache.h"
#include "exception.h"
#endif /* _COMMON_MIPS_H_ */

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-05-17 sangwei first version
*/
#ifndef __MIPS_INC__
#define __MIPS_INC__
#define zero $0 /* wired zero */
// #define at $1
#define v0 $2 /* return value */
#define v1 $3
#define a0 $4 /* argument registers */
#define a1 $5
#define a2 $6
#define a3 $7
#define t0 $8 /* caller saved */
#define t1 $9
#define t2 $10
#define t3 $11
#define t4 $12
#define t5 $13
#define t6 $14
#define t7 $15
#define s0 $16 /* callee saved */
#define s1 $17
#define s2 $18
#define s3 $19
#define s4 $20
#define s5 $21
#define s6 $22
#define s7 $23
#define t8 $24 /* caller saved */
#define t9 $25
#define jp $25 /* PIC jump register */
#define k0 $26 /* kernel scratch */
#define k1 $27
#define gp $28 /* global pointer */
#define sp $29 /* stack pointer */
#define fp $30 /* frame pointer */
#define s8 $30 /* same like fp! */
#define ra $31 /* return address */
#endif /* end of __MIPS_INC__ */

View File

@ -0,0 +1,165 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef _MIPS_ADDRSPACE_H_
#define _MIPS_ADDRSPACE_H_
/*
* Configure language
*/
#ifdef __ASSEMBLY__
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
#define _CONST64_(x) x
#else
#define _ATYPE_ __PTRDIFF_TYPE__
#define _ATYPE32_ int
#define _ATYPE64_ __s64
#ifdef ARCH_MIPS64
#define _CONST64_(x) x ## L
#else
#define _CONST64_(x) x ## LL
#endif
#endif
/*
* 32-bit MIPS address spaces
*/
#ifdef __ASSEMBLY__
#define _ACAST32_
#define _ACAST64_
#else
#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */
#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */
#endif
/*
* Returns the kernel segment base of a given address
*/
#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000)
/*
* Returns the physical address of a CKSEGx / XKPHYS address
*/
#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define XPHYSADDR(a) ((_ACAST64_(a)) & \
_CONST64_(0x000000ffffffffff))
#ifdef ARCH_MIPS64
/*
* Memory segments (64bit kernel mode addresses)
* The compatibility segments use the full 64-bit sign extended value. Note
* the R8000 doesn't have them so don't reference these in generic MIPS code.
*/
#define XKUSEG _CONST64_(0x0000000000000000)
#define XKSSEG _CONST64_(0x4000000000000000)
#define XKPHYS _CONST64_(0x8000000000000000)
#define XKSEG _CONST64_(0xc000000000000000)
#define CKSEG0 _CONST64_(0xffffffff80000000)
#define CKSEG1 _CONST64_(0xffffffffa0000000)
#define CKSSEG _CONST64_(0xffffffffc0000000)
#define CKSEG3 _CONST64_(0xffffffffe0000000)
#define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0)
#define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1)
#define CKSEG2ADDR(a) (CPHYSADDR(a) | CKSEG2)
#define CKSEG3ADDR(a) (CPHYSADDR(a) | CKSEG3)
#define KUSEGBASE 0xffffffff00000000
#define KSEG0BASE 0xffffffff80000000
#define KSEG1BASE 0xffffffffa0000000
#define KSEG2BASE 0xffffffffc0000000
#define KSEG3BASE 0xffffffffe0000000
#else
#define CKSEG0ADDR(a) (CPHYSADDR(a) | KSEG0BASE)
#define CKSEG1ADDR(a) (CPHYSADDR(a) | KSEG1BASE)
#define CKSEG2ADDR(a) (CPHYSADDR(a) | KSEG2BASE)
#define CKSEG3ADDR(a) (CPHYSADDR(a) | KSEG3BASE)
/*
* Map an address to a certain kernel segment
*/
#define KSEG0ADDR(a) (CPHYSADDR(a) | KSEG0BASE)
#define KSEG1ADDR(a) (CPHYSADDR(a) | KSEG1BASE)
#define KSEG2ADDR(a) (CPHYSADDR(a) | KSEG2BASE)
#define KSEG3ADDR(a) (CPHYSADDR(a) | KSEG3BASE)
#define CKUSEG 0x00000000
#define CKSEG0 0x80000000
#define CKSEG1 0xa0000000
#define CKSEG2 0xc0000000
#define CKSEG3 0xe0000000
/*
* Memory segments (32bit kernel mode addresses)
* These are the traditional names used in the 32-bit universe.
*/
#define KUSEGBASE 0x00000000
#define KSEG0BASE 0x80000000
#define KSEG1BASE 0xa0000000
#define KSEG2BASE 0xc0000000
#define KSEG3BASE 0xe0000000
#endif
/*
* Cache modes for XKPHYS address conversion macros
*/
#define K_CALG_COH_EXCL1_NOL2 0
#define K_CALG_COH_SHRL1_NOL2 1
#define K_CALG_UNCACHED 2
#define K_CALG_NONCOHERENT 3
#define K_CALG_COH_EXCL 4
#define K_CALG_COH_SHAREABLE 5
#define K_CALG_NOTUSED 6
#define K_CALG_UNCACHED_ACCEL 7
/*
* 64-bit address conversions
*/
#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p))
#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p))
#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
#define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \
(_CONST64_(cm) << 59) | (a))
/*
* Returns the uncached address of a sdram address
*/
#ifndef __ASSEMBLY__
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
* the region, 3 bits for the CCA mode. This leaves 59 bits of which the
* R8000 implements most with its 48-bit physical address space.
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000)
#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
#define KDM_TO_PHYS(x) (_ACAST64_ (x) & TO_PHYS_MASK)
#define PHYS_TO_K0(x) (_ACAST64_ (x) | CAC_BASE)
#endif
#ifndef __ASSEMBLY__
#define REG8( addr ) (*(volatile u8 *) (addr))
#define REG16( addr ) (*(volatile u16 *)(addr))
#define REG32( addr ) (*(volatile u32 *)(addr))
#define REG64( addr ) (*(volatile u64 *)(addr))
#endif
#endif /* _MIPS_ADDRSPACE_H_ */

View File

@ -0,0 +1,139 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-09-07 Urey the first version
*/
#include <rtthread.h>
#include "mips.h"
extern void cache_init(rt_ubase_t cache_size, rt_ubase_t cache_line_size);
void r4k_cache_init(void)
{
// cache_init(dcache_size, cpu_dcache_line_size);
}
void r4k_cache_flush_all(void)
{
blast_dcache16();
blast_icache16();
}
void r4k_icache_flush_all(void)
{
blast_icache16();
}
void r4k_icache_flush_range(rt_ubase_t addr, rt_ubase_t size)
{
rt_ubase_t end, a;
if (size > g_mips_core.icache_size)
{
blast_icache16();
}
else
{
rt_ubase_t ic_lsize = g_mips_core.icache_line_size;
a = addr & ~(ic_lsize - 1);
end = ((addr + size) - 1) & ~(ic_lsize - 1);
while (1)
{
flush_icache_line(a);
if (a == end)
break;
a += ic_lsize;
}
}
}
void r4k_icache_lock_range(rt_ubase_t addr, rt_ubase_t size)
{
rt_ubase_t end, a;
rt_ubase_t ic_lsize = g_mips_core.icache_line_size;
a = addr & ~(ic_lsize - 1);
end = ((addr + size) - 1) & ~(ic_lsize - 1);
while (1)
{
lock_icache_line(a);
if (a == end)
break;
a += ic_lsize;
}
}
void r4k_dcache_inv(rt_ubase_t addr, rt_ubase_t size)
{
rt_ubase_t end, a;
rt_ubase_t dc_lsize = g_mips_core.dcache_line_size;
a = addr & ~(dc_lsize - 1);
end = ((addr + size) - 1) & ~(dc_lsize - 1);
while (1)
{
invalidate_dcache_line(a);
if (a == end)
break;
a += dc_lsize;
}
}
void r4k_dcache_wback_inv(rt_ubase_t addr, rt_ubase_t size)
{
rt_ubase_t end, a;
if (size >= g_mips_core.dcache_size)
{
blast_dcache16();
}
else
{
rt_ubase_t dc_lsize = g_mips_core.dcache_line_size;
a = addr & ~(dc_lsize - 1);
end = ((addr + size) - 1) & ~(dc_lsize - 1);
while (1)
{
flush_dcache_line(a);
if (a == end)
break;
a += dc_lsize;
}
}
}
#define dma_cache_wback_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_wback(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
void r4k_dma_cache_sync(rt_ubase_t addr, rt_size_t size, enum dma_data_direction direction)
{
switch (direction)
{
case DMA_TO_DEVICE:
r4k_dcache_wback_inv(addr, size);
break;
case DMA_FROM_DEVICE:
r4k_dcache_wback_inv(addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(addr, size);
break;
default:
RT_ASSERT(0) ;
}
}

View File

@ -0,0 +1,216 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-09-07 Urey the first version
*/
#ifndef _MIPS_CACHE_H_
#define _MIPS_CACHE_H_
#ifndef __ASSEMBLER__
#include <rtdef.h>
#include <mips_cfg.h>
/*
* Cache Operations available on all MIPS processors with R4000-style caches
*/
#define INDEX_INVALIDATE_I 0x00
#define INDEX_WRITEBACK_INV_D 0x01
#define INDEX_LOAD_TAG_I 0x04
#define INDEX_LOAD_TAG_D 0x05
#define INDEX_STORE_TAG_I 0x08
#define INDEX_STORE_TAG_D 0x09
#define HIT_INVALIDATE_I 0x10
#define HIT_INVALIDATE_D 0x11
#define HIT_WRITEBACK_INV_D 0x15
/*
*The lock state is cleared by executing an Index
Invalidate, Index Writeback Invalidate, Hit
Invalidate, or Hit Writeback Invalidate
operation to the locked line, or via an Index
Store Tag operation with the lock bit reset in
the TagLo register.
*/
#define FETCH_AND_LOCK_I 0x1c
#define FETCH_AND_LOCK_D 0x1d
enum dma_data_direction
{
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
/*
* R4000-specific cacheops
*/
#define CREATE_DIRTY_EXCL_D 0x0d
#define FILL 0x14
#define HIT_WRITEBACK_I 0x18
#define HIT_WRITEBACK_D 0x19
/*
* R4000SC and R4400SC-specific cacheops
*/
#define INDEX_INVALIDATE_SI 0x02
#define INDEX_WRITEBACK_INV_SD 0x03
#define INDEX_LOAD_TAG_SI 0x06
#define INDEX_LOAD_TAG_SD 0x07
#define INDEX_STORE_TAG_SI 0x0A
#define INDEX_STORE_TAG_SD 0x0B
#define CREATE_DIRTY_EXCL_SD 0x0f
#define HIT_INVALIDATE_SI 0x12
#define HIT_INVALIDATE_SD 0x13
#define HIT_WRITEBACK_INV_SD 0x17
#define HIT_WRITEBACK_SD 0x1b
#define HIT_SET_VIRTUAL_SI 0x1e
#define HIT_SET_VIRTUAL_SD 0x1f
/*
* R5000-specific cacheops
*/
#define R5K_PAGE_INVALIDATE_S 0x17
/*
* RM7000-specific cacheops
*/
#define PAGE_INVALIDATE_T 0x16
/*
* R10000-specific cacheops
*
* Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
* Most of the _S cacheops are identical to the R4000SC _SD cacheops.
*/
#define INDEX_WRITEBACK_INV_S 0x03
#define INDEX_LOAD_TAG_S 0x07
#define INDEX_STORE_TAG_S 0x0B
#define HIT_INVALIDATE_S 0x13
#define CACHE_BARRIER 0x14
#define HIT_WRITEBACK_INV_S 0x17
#define INDEX_LOAD_DATA_I 0x18
#define INDEX_LOAD_DATA_D 0x19
#define INDEX_LOAD_DATA_S 0x1b
#define INDEX_STORE_DATA_I 0x1c
#define INDEX_STORE_DATA_D 0x1d
#define INDEX_STORE_DATA_S 0x1f
#define cache_op(op, addr) \
__asm__ __volatile__( \
".set push\n" \
".set noreorder\n" \
".set mips3\n" \
"cache %0, %1\n" \
".set pop\n" \
: \
: "i" (op), "R" (*(unsigned char *)(addr)))
#define cache16_unroll32(base, op) \
__asm__ __volatile__( \
" .set noreorder \n" \
" .set mips3 \n" \
" cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
" cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
" cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
" cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
" cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
" cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
" cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
" cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
" cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
" cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
" cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
" cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
" cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
" cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
" cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
" cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
" .set mips0 \n" \
" .set reorder \n" \
: \
: "r" (base), \
"i" (op));
static inline void flush_icache_line_indexed(rt_ubase_t addr)
{
cache_op(INDEX_INVALIDATE_I, addr);
}
static inline void flush_dcache_line_indexed(rt_ubase_t addr)
{
cache_op(INDEX_WRITEBACK_INV_D, addr);
}
static inline void flush_icache_line(rt_ubase_t addr)
{
cache_op(HIT_INVALIDATE_I, addr);
}
static inline void lock_icache_line(rt_ubase_t addr)
{
cache_op(FETCH_AND_LOCK_I, addr);
}
static inline void lock_dcache_line(rt_ubase_t addr)
{
cache_op(FETCH_AND_LOCK_D, addr);
}
static inline void flush_dcache_line(rt_ubase_t addr)
{
cache_op(HIT_WRITEBACK_INV_D, addr);
}
static inline void invalidate_dcache_line(rt_ubase_t addr)
{
cache_op(HIT_INVALIDATE_D, addr);
}
static inline void blast_dcache16(void)
{
rt_ubase_t start = KSEG0BASE;
rt_ubase_t end = start + g_mips_core.dcache_size;
rt_ubase_t addr;
for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
cache16_unroll32(addr, INDEX_WRITEBACK_INV_D);
}
static inline void inv_dcache16(void)
{
rt_ubase_t start = KSEG0BASE;
rt_ubase_t end = start + g_mips_core.dcache_size;
rt_ubase_t addr;
for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
cache16_unroll32(addr, HIT_INVALIDATE_D);
}
static inline void blast_icache16(void)
{
rt_ubase_t start = KSEG0BASE;
rt_ubase_t end = start + g_mips_core.icache_size;
rt_ubase_t addr;
for (addr = start; addr < end; addr += g_mips_core.icache_line_size)
cache16_unroll32(addr, INDEX_INVALIDATE_I);
}
void r4k_cache_init(void);
void r4k_cache_flush_all(void);
void r4k_icache_flush_all(void);
void r4k_icache_flush_range(rt_ubase_t addr, rt_ubase_t size);
void r4k_icache_lock_range(rt_ubase_t addr, rt_ubase_t size);
void r4k_dcache_inv(rt_ubase_t addr, rt_ubase_t size);
void r4k_dcache_wback_inv(rt_ubase_t addr, rt_ubase_t size);
void r4k_dma_cache_sync(rt_ubase_t addr, rt_size_t size, enum dma_data_direction direction);
#endif
#endif /* _MIPS_CACHE_H_ */

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-09-07 Urey the first version
*/
#ifndef _MIPS_CFG_H_
#define _MIPS_CFG_H_
#ifndef __ASSEMBLY__
#include <stdint.h>
typedef struct mips32_core_cfg
{
uint16_t icache_line_size;
uint16_t icache_lines_per_way;
uint16_t icache_ways;
uint16_t icache_size;
uint16_t dcache_line_size;
uint16_t dcache_lines_per_way;
uint16_t dcache_ways;
uint16_t dcache_size;
uint16_t max_tlb_entries; /* number of tlb entry */
} mips32_core_cfg_t;
extern mips32_core_cfg_t g_mips_core;
#endif /* __ASSEMBLY__ */
#endif /* _MIPS_CFG_H_ */

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef _MIPS_FPU_H_
#define _MIPS_FPU_H_
#ifndef __ASSEMBLY__
#include <mips_regs.h>
/**
* init hardware FPU
*/
#ifdef RT_USING_FPU
rt_inline void rt_hw_fpu_init(void)
{
rt_uint32_t c0_status = 0;
rt_uint32_t c1_status = 0;
/* Enable CU1 */
c0_status = read_c0_status();
c0_status |= (ST0_CU1 | ST0_FR);
write_c0_status(c0_status);
/* FCSR Configs */
c1_status = read_c1_status();
c1_status |= (FPU_CSR_FS | FPU_CSR_FO | FPU_CSR_FN); /* Set FS, FO, FN */
c1_status &= ~(FPU_CSR_ALL_E); /* Disable exception */
c1_status = (c1_status & (~FPU_CSR_RM)) | FPU_CSR_RN; /* Set RN */
write_c1_status(c1_status);
return ;
}
#else
rt_inline void rt_hw_fpu_init(void){} /* Do nothing */
#endif
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-09-07 Urey the first version
*/
#ifndef _MIPS_TYPES_H_
#define _MIPS_TYPES_H_
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
#if (_MIPS_SZLONG == 64)
typedef __signed__ long __s64;
typedef unsigned long __u64;
#else
#if defined(__GNUC__)
__extension__ typedef __signed__ long long __s64;
__extension__ typedef unsigned long long __u64;
#endif
#endif
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#define BITS_PER_LONG _MIPS_SZLONG
typedef __signed char s8;
typedef unsigned char u8;
typedef __signed short s16;
typedef unsigned short u16;
typedef __signed int s32;
typedef unsigned int u32;
#if (_MIPS_SZLONG == 64)
typedef __signed__ long s64;
typedef unsigned long u64;
#else
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
typedef __signed__ long long s64;
typedef unsigned long long u64;
#endif
#endif
typedef u32 dma_addr_t;
typedef u32 phys_addr_t;
typedef u32 phys_size_t;
/*
* Don't use phys_t. You've been warned.
*/
typedef unsigned long phys_t;
#endif /* __ASSEMBLY__ */
#endif /* _MIPS_TYPES_H_ */

View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef _MIPS_PTRACE_H
#define _MIPS_PTRACE_H
#include "asm.h"
#include "mips_regs.h"
#define HI_LO_SIZE 4
#define FP_REG_SIZE 8
#define NUM_FPU_REGS 16
#ifndef __ASSEMBLY__
#include <rtthread.h>
struct mips_fpu_struct {
rt_uint64_t fpr[NUM_FPU_REGS];
rt_uint32_t fcr31;
rt_uint32_t pad;
};
struct pt_regs {
#ifndef ARCH_MIPS64
/* Only O32 Need This! */
/* Pad bytes for argument save space on the stack. */
rt_uint32_t pad0[8];
/* Saved main processor registers. */
rt_uint32_t regs[32];
/* Saved special registers. */
rt_uint32_t cp0_status;
rt_uint32_t hi;
rt_uint32_t lo;
rt_uint32_t cp0_badvaddr;
rt_uint32_t cp0_cause;
rt_uint32_t cp0_epc;
#else
/* Saved main processor registers. */
unsigned long regs[32];
/* Saved special registers. */
rt_uint32_t cp0_status;
rt_uint32_t hi;
rt_uint32_t lo;
unsigned long cp0_badvaddr;
rt_uint32_t cp0_cause;
unsigned long cp0_epc;
#endif
#ifdef RT_USING_FPU
/* FPU Registers */
/* Unlike Linux Kernel, we save these registers unconditionally,
* so it should be a part of pt_regs */
struct mips_fpu_struct fpu;
#endif
} __attribute__((aligned(8)));
#endif
/* Note: For call stack o32 ABI has 0x8 shadowsoace Here */
#ifdef ARCH_MIPS64
#define PT_R0 (0x0 * LONGSIZE) /* 0 */
#else
#define PT_R0 (0x8 * LONGSIZE) /* 0 */
#endif
#define PT_R1 ((PT_R0) + LONGSIZE) /* 1 */
#define PT_R2 ((PT_R1) + LONGSIZE) /* 2 */
#define PT_R3 ((PT_R2) + LONGSIZE) /* 3 */
#define PT_R4 ((PT_R3) + LONGSIZE) /* 4 */
#define PT_R5 ((PT_R4) + LONGSIZE) /* 5 */
#define PT_R6 ((PT_R5) + LONGSIZE) /* 6 */
#define PT_R7 ((PT_R6) + LONGSIZE) /* 7 */
#define PT_R8 ((PT_R7) + LONGSIZE) /* 8 */
#define PT_R9 ((PT_R8) + LONGSIZE) /* 9 */
#define PT_R10 ((PT_R9) + LONGSIZE) /* 10 */
#define PT_R11 ((PT_R10) + LONGSIZE) /* 11 */
#define PT_R12 ((PT_R11) + LONGSIZE) /* 12 */
#define PT_R13 ((PT_R12) + LONGSIZE) /* 13 */
#define PT_R14 ((PT_R13) + LONGSIZE) /* 14 */
#define PT_R15 ((PT_R14) + LONGSIZE) /* 15 */
#define PT_R16 ((PT_R15) + LONGSIZE) /* 16 */
#define PT_R17 ((PT_R16) + LONGSIZE) /* 17 */
#define PT_R18 ((PT_R17) + LONGSIZE) /* 18 */
#define PT_R19 ((PT_R18) + LONGSIZE) /* 19 */
#define PT_R20 ((PT_R19) + LONGSIZE) /* 20 */
#define PT_R21 ((PT_R20) + LONGSIZE) /* 21 */
#define PT_R22 ((PT_R21) + LONGSIZE) /* 22 */
#define PT_R23 ((PT_R22) + LONGSIZE) /* 23 */
#define PT_R24 ((PT_R23) + LONGSIZE) /* 24 */
#define PT_R25 ((PT_R24) + LONGSIZE) /* 25 */
#define PT_R26 ((PT_R25) + LONGSIZE) /* 26 */
#define PT_R27 ((PT_R26) + LONGSIZE) /* 27 */
#define PT_R28 ((PT_R27) + LONGSIZE) /* 28 */
#define PT_R29 ((PT_R28) + LONGSIZE) /* 29 */
#define PT_R30 ((PT_R29) + LONGSIZE) /* 30 */
#define PT_R31 ((PT_R30) + LONGSIZE) /* 31 */
/*
* Saved special registers
*/
#define PT_STATUS ((PT_R31) + LONGSIZE) /* 32 */
#define PT_HI ((PT_STATUS) + HI_LO_SIZE) /* 33 */
#define PT_LO ((PT_HI) + HI_LO_SIZE) /* 34 */
#define PT_BADVADDR ((PT_LO) + LONGSIZE) /* 35 */
#define PT_CAUSE ((PT_BADVADDR) + LONGSIZE) /* 36 */
#define PT_EPC ((PT_CAUSE) + LONGSIZE) /* 37 */
#define PT_REG_END ((PT_EPC) + LONGSIZE) /* Align already ensured manually */
#ifdef RT_USING_FPU
#define PT_FPU_R0 (PT_REG_END)
#define PT_FPU_R2 ((PT_FPU_R0) + FP_REG_SIZE)
#define PT_FPU_R4 ((PT_FPU_R2) + FP_REG_SIZE)
#define PT_FPU_R6 ((PT_FPU_R4) + FP_REG_SIZE)
#define PT_FPU_R8 ((PT_FPU_R6) + FP_REG_SIZE)
#define PT_FPU_R10 ((PT_FPU_R8) + FP_REG_SIZE)
#define PT_FPU_R12 ((PT_FPU_R10) + FP_REG_SIZE)
#define PT_FPU_R14 ((PT_FPU_R12) + FP_REG_SIZE)
#define PT_FPU_R16 ((PT_FPU_R14) + FP_REG_SIZE)
#define PT_FPU_R18 ((PT_FPU_R16) + FP_REG_SIZE)
#define PT_FPU_R20 ((PT_FPU_R18) + FP_REG_SIZE)
#define PT_FPU_R22 ((PT_FPU_R20) + FP_REG_SIZE)
#define PT_FPU_R24 ((PT_FPU_R22) + FP_REG_SIZE)
#define PT_FPU_R26 ((PT_FPU_R24) + FP_REG_SIZE)
#define PT_FPU_R28 ((PT_FPU_R26) + FP_REG_SIZE)
#define PT_FPU_R30 ((PT_FPU_R28) + FP_REG_SIZE)
#define PT_FPU_FCSR31 ((PT_FPU_R30) + FP_REG_SIZE)
#define PT_FPU_PAD0 ((PT_FPU_FCSR31) + 4)
#define PT_FPU_END ((PT_FPU_PAD0) + 4)
#define PT_SIZE PT_FPU_END
#else
#define PT_SIZE PT_REG_END
#endif
#endif

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#include <rtthread.h>
#include "mips.h"
register rt_uint32_t $GP __asm__ ("$28");
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter, rt_uint8_t *stack_addr, void *texit)
{
static rt_ubase_t wSR=0;
static rt_ubase_t wGP;
rt_uint8_t *stk;
struct pt_regs *pt;
rt_uint32_t i;
/* Get stack aligned */
stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_ubase_t)stack_addr, 8);
stk -= sizeof(struct pt_regs);
pt = (struct pt_regs*)stk;
#ifndef ARCH_MIPS64
for (i = 0; i < 8; ++i)
{
pt->pad0[i] = 0xdeadbeef;
}
#endif
/* Fill Stack register numbers */
for (i = 0; i < 32; ++i)
{
pt->regs[i] = 0xdeadbeef;
}
pt->regs[REG_SP] = (rt_ubase_t)stk;
pt->regs[REG_A0] = (rt_ubase_t)parameter;
pt->regs[REG_GP] = (rt_ubase_t)$GP;
pt->regs[REG_FP] = (rt_ubase_t)0x0;
pt->regs[REG_RA] = (rt_ubase_t)texit;
pt->hi = 0x0;
pt->lo = 0x0;
pt->cp0_status = (ST0_IE | ST0_CU0 | ST0_IM);
#ifdef RT_USING_FPU
pt->cp0_status |= (ST0_CU1 | ST0_FR);
#endif
pt->cp0_cause = read_c0_cause();
pt->cp0_epc = (rt_ubase_t)tentry;
pt->cp0_badvaddr = 0x0;
return stk;
}

View File

@ -0,0 +1,256 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef __STACKFRAME_H__
#define __STACKFRAME_H__
#include "asm.h"
#include "mips_regs.h"
#include "ptrace.h"
/* You MUST ensure FP is enabled before SAVE_FPU! */
.macro SAVE_FPU
.set push
.set noreorder
#ifdef RT_USING_FPU
/* Ensure CU1 (FPU) is enabled */
MFC0 v1, CP0_STATUS
lui t1, %hi(ST0_CU1)
addiu t1, t1, %lo(ST0_CU1)
or v1, v1, t1
MTC0 v1, CP0_STATUS
SSNOP
cfc1 v1, fcr31
/* Store as delay slot */
s.d $f0, PT_FPU_R0(sp)
s.d $f2, PT_FPU_R2(sp)
s.d $f4, PT_FPU_R4(sp)
s.d $f6, PT_FPU_R6(sp)
s.d $f8, PT_FPU_R8(sp)
s.d $f10, PT_FPU_R10(sp)
s.d $f12, PT_FPU_R12(sp)
s.d $f14, PT_FPU_R14(sp)
s.d $f16, PT_FPU_R16(sp)
s.d $f18, PT_FPU_R18(sp)
s.d $f20, PT_FPU_R20(sp)
s.d $f22, PT_FPU_R22(sp)
s.d $f24, PT_FPU_R24(sp)
s.d $f26, PT_FPU_R26(sp)
s.d $f28, PT_FPU_R28(sp)
s.d $f30, PT_FPU_R30(sp)
LONG_S v1, PT_FPU_FCSR31(sp)
#endif
.set reorder
.set pop
.endm
.macro SAVE_AT
.set push
.set noat
LONG_S $1, PT_R1(sp)
.set pop
.endm
.macro SAVE_TEMP
mfhi v1
LONG_S $8, PT_R8(sp)
LONG_S $9, PT_R9(sp)
sw v1, PT_HI(sp)
mflo v1
LONG_S $10, PT_R10(sp)
LONG_S $11, PT_R11(sp)
sw v1, PT_LO(sp)
LONG_S $12, PT_R12(sp)
LONG_S $13, PT_R13(sp)
LONG_S $14, PT_R14(sp)
LONG_S $15, PT_R15(sp)
LONG_S $24, PT_R24(sp)
.endm
.macro SAVE_STATIC
LONG_S $16, PT_R16(sp)
LONG_S $17, PT_R17(sp)
LONG_S $18, PT_R18(sp)
LONG_S $19, PT_R19(sp)
LONG_S $20, PT_R20(sp)
LONG_S $21, PT_R21(sp)
LONG_S $22, PT_R22(sp)
LONG_S $23, PT_R23(sp)
LONG_S $30, PT_R30(sp)
.endm
.macro SAVE_SOME
.set push
.set noat
.set reorder
move k1, sp
move k0, sp
PTR_SUBU sp, k1, PT_SIZE
LONG_S k0, PT_R29(sp)
LONG_S $3, PT_R3(sp)
LONG_S $0, PT_R0(sp)
MFC0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp)
LONG_S v1, PT_STATUS(sp)
LONG_S $4, PT_R4(sp)
MFC0 v1, CP0_CAUSE
LONG_S $5, PT_R5(sp)
LONG_S v1, PT_CAUSE(sp)
LONG_S $6, PT_R6(sp)
MFC0 v1, CP0_EPC
LONG_S $7, PT_R7(sp)
LONG_S v1, PT_EPC(sp)
LONG_S $25, PT_R25(sp)
LONG_S $28, PT_R28(sp)
LONG_S $31, PT_R31(sp)
.set pop
.endm
.macro SAVE_ALL
SAVE_SOME
SAVE_AT
SAVE_TEMP
SAVE_FPU
SAVE_STATIC
.endm
.macro RESTORE_FPU
.set push
.set noreorder
#ifdef RT_USING_FPU
/* Ensure CU1 (FPU) is enabled */
MFC0 v1, CP0_STATUS
lui t1, %hi(ST0_CU1)
addiu t1, t1, %lo(ST0_CU1)
or v1, v1, t1
MTC0 v1, CP0_STATUS
SSNOP
LONG_L v1, PT_FPU_FCSR31(sp)
ctc1 v1, fcr31
l.d $f0, PT_FPU_R0(sp)
l.d $f2, PT_FPU_R2(sp)
l.d $f4, PT_FPU_R4(sp)
l.d $f6, PT_FPU_R6(sp)
l.d $f8, PT_FPU_R8(sp)
l.d $f10, PT_FPU_R10(sp)
l.d $f12, PT_FPU_R12(sp)
l.d $f14, PT_FPU_R14(sp)
l.d $f16, PT_FPU_R16(sp)
l.d $f18, PT_FPU_R18(sp)
l.d $f20, PT_FPU_R20(sp)
l.d $f22, PT_FPU_R22(sp)
l.d $f24, PT_FPU_R24(sp)
l.d $f26, PT_FPU_R26(sp)
l.d $f28, PT_FPU_R28(sp)
l.d $f30, PT_FPU_R30(sp)
#endif
.set reorder
.set pop
.endm
.macro RESTORE_AT
.set push
.set noat
LONG_L $1, PT_R1(sp)
.set pop
.endm
.macro RESTORE_TEMP
lw $24, PT_LO(sp)
LONG_L $8, PT_R8(sp)
LONG_L $9, PT_R9(sp)
mtlo $24
lw $24, PT_HI(sp)
LONG_L $10, PT_R10(sp)
LONG_L $11, PT_R11(sp)
mthi $24
LONG_L $12, PT_R12(sp)
LONG_L $13, PT_R13(sp)
LONG_L $14, PT_R14(sp)
LONG_L $15, PT_R15(sp)
LONG_L $24, PT_R24(sp)
.endm
.macro RESTORE_STATIC
LONG_L $16, PT_R16(sp)
LONG_L $17, PT_R17(sp)
LONG_L $18, PT_R18(sp)
LONG_L $19, PT_R19(sp)
LONG_L $20, PT_R20(sp)
LONG_L $21, PT_R21(sp)
LONG_L $22, PT_R22(sp)
LONG_L $23, PT_R23(sp)
LONG_L $30, PT_R30(sp)
.endm
#define STATMASK 0x1f
.macro RESTORE_SOME
.set push
.set reorder
.set noat
mfc0 a0, CP0_STATUS
ori a0, STATMASK
xori a0, STATMASK
mtc0 a0, CP0_STATUS
li v1, (ST0_CU1 | ST0_FR | ST0_IM)
and a0, v1, a0
LONG_L v0, PT_STATUS(sp)
li v1, ~(ST0_CU1 | ST0_FR | ST0_IM)
and v0, v1
or v0, a0
li v1, (ST0_KX | ST0_SX | ST0_UX)
or v0, v1
mtc0 v0, CP0_STATUS
LONG_L v1, PT_EPC(sp)
MTC0 v1, CP0_EPC
LONG_L $31, PT_R31(sp)
LONG_L $28, PT_R28(sp)
LONG_L $25, PT_R25(sp)
LONG_L $7, PT_R7(sp)
LONG_L $6, PT_R6(sp)
LONG_L $5, PT_R5(sp)
LONG_L $4, PT_R4(sp)
LONG_L $3, PT_R3(sp)
LONG_L $2, PT_R2(sp)
.set pop
.endm
.macro RESTORE_SP_AND_RET
LONG_L sp, PT_R29(sp)
eret
nop
.endm
.macro RESTORE_SP
LONG_L sp, PT_R29(sp)
.endm
.macro RESTORE_ALL
RESTORE_TEMP
RESTORE_FPU
RESTORE_STATIC
RESTORE_AT
RESTORE_SOME
RESTORE_SP
.endm
.macro RESTORE_ALL_AND_RET
RESTORE_TEMP
RESTORE_FPU
RESTORE_STATIC
RESTORE_AT
RESTORE_SOME
RESTORE_SP_AND_RET
.endm
#endif /* end of __STACKFRAME_H__ */

View File

@ -0,0 +1,13 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,231 @@
/*
* Cache Ops For Loongson GS232
*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-07-09 Bernard first version
* 2011-08-08 lgnq modified for LS1B
* 2015-07-08 chinesebear modified for loongson 1c
*/
#include <rtthread.h>
#include <mips.h>
#define K0BASE 0x80000000
#define PRID_LS1C 0x4220
extern void Clear_TagLo (void);
extern void Invalidate_Icache_Ls1c(unsigned int);
extern void Invalidate_Dcache_ClearTag_Ls1c(unsigned int);
extern void Invalidate_Dcache_Fill_Ls1c(unsigned int);
extern void Writeback_Invalidate_Dcache(unsigned int);
extern void enable_cpu_cache(void);
typedef struct cacheinfo_t
{
unsigned int icache_size;
unsigned int dcache_size;
unsigned int icacheline_size;
unsigned int dcacheline_size;
} cacheinfo_t ;
typedef struct cacheop_t
{
void (*Clear_TagLo) (void);
void (*Invalidate_Icache) (unsigned int);
void (*Invalidate_Dcache_Fill) (unsigned int);
void (*Invalidate_Dcache_ClearTag) (unsigned int);
void (*Init_Cache)(void);
} cacheop_t ;
static cacheop_t cacheop, *pcacheop;
static cacheinfo_t cacheinfo, *pcacheinfo;
int identify_cpu(void)
{
unsigned int cpu_id;
pcacheop = &cacheop;
pcacheinfo = &cacheinfo;
rt_kprintf("CPU configure: 0x%08x\n", read_c0_config());
cpu_id = read_c0_prid();
switch (cpu_id)
{
case PRID_LS1C:
rt_kprintf("CPU:Loongson 1C\n");
pcacheop->Clear_TagLo = Clear_TagLo;
pcacheop->Invalidate_Icache = Invalidate_Icache_Ls1c;
pcacheop->Invalidate_Dcache_Fill = Invalidate_Dcache_Fill_Ls1c;
pcacheop->Invalidate_Dcache_ClearTag = Invalidate_Dcache_ClearTag_Ls1c;
break;
default:
rt_kprintf("Unknown CPU type, system halted!\n");
while (1)
{
;
}
break;
}
return 0;
}
void probe_cache(void)
{
unsigned int config1 = read_c0_config1();
unsigned int icache_size, icache_line_size, icache_sets, icache_ways;
unsigned int dcache_size, dcache_line_size, dcache_sets, dcache_ways;
if ((icache_line_size = ((config1 >> 19) & 7)))
icache_line_size = 2 << icache_line_size;
else
icache_line_size = icache_line_size;
icache_sets = 64 << ((config1 >> 22) & 7);
icache_ways = 1 + ((config1 >> 16) & 7);
icache_size = icache_sets * icache_ways * icache_line_size;
if ((dcache_line_size = ((config1 >> 10) & 7)))
dcache_line_size = 2 << dcache_line_size;
else
dcache_line_size = dcache_line_size;
dcache_sets = 64 << ((config1 >> 13) & 7);
dcache_ways = 1 + ((config1 >> 7) & 7);
dcache_size = dcache_sets * dcache_ways * dcache_line_size;
rt_kprintf("DCache %2dkb, linesize %d bytes.\n", dcache_size >> 10, dcache_line_size);
rt_kprintf("ICache %2dkb, linesize %d bytes.\n", icache_size >> 10, icache_line_size);
pcacheinfo->icache_size = icache_size;
pcacheinfo->dcache_size = dcache_size;
pcacheinfo->icacheline_size = icache_line_size;
pcacheinfo->dcacheline_size = dcache_line_size;
return ;
}
void invalidate_writeback_dcache_all(void)
{
unsigned int start = K0BASE;
unsigned int end = (start + pcacheinfo->dcache_size);
while (start < end)
{
Writeback_Invalidate_Dcache(start); //hit writeback invalidate
start += pcacheinfo->dcacheline_size;
}
}
void invalidate_writeback_dcache(unsigned long addr, int size)
{
unsigned long start, end;
start = (addr + pcacheinfo->dcacheline_size -1) & (- pcacheinfo->dcacheline_size);
end = (addr + size + pcacheinfo->dcacheline_size -1) & ( -pcacheinfo->dcacheline_size);
while (start <end)
{
Writeback_Invalidate_Dcache(start);
start += pcacheinfo->dcacheline_size;
}
}
void invalidate_icache_all(void)
{
unsigned int start = K0BASE;
unsigned int end = (start + pcacheinfo->icache_size);
while (start < end)
{
pcacheop->Invalidate_Icache(start);
start += pcacheinfo->icacheline_size;
}
}
void invalidate_dcache_all(void)
{
unsigned int start = K0BASE;
unsigned int end = (start + pcacheinfo->dcache_size);
while (start <end)
{
Invalidate_Dcache_Fill_Ls1c(start);
start += pcacheinfo->icacheline_size;
}
}
//with cache disabled
void init_dcache(void)
{
unsigned int start = K0BASE;
unsigned int end = (start + pcacheinfo->dcache_size);
while (start < end)
{
pcacheop->Invalidate_Dcache_ClearTag(start);
start += pcacheinfo->dcacheline_size;
}
}
void rt_hw_cache_init(void)
{
unsigned int start, end;
/* 1. identify cpu and probe cache */
identify_cpu();
probe_cache();
start = K0BASE;
end = (start + pcacheinfo->icache_size);
/*
* 2. clear CP0 taglo/taghi register;
*/
pcacheop->Clear_TagLo();
/*
* 3. invalidate instruction cache;
*/
while (start < end)
{
pcacheop->Invalidate_Icache(start); //index invalidate icache
start += pcacheinfo->icacheline_size;
}
/*
* 4. invalidate data cache;
*/
start = K0BASE;
end = (start + pcacheinfo->dcache_size);
while(start < end)
{
pcacheop->Invalidate_Dcache_ClearTag(start);
start += pcacheinfo->dcacheline_size;
}
start = K0BASE;
while(start < end)
{
pcacheop->Invalidate_Dcache_Fill(start); //index invalidate dcache
start += pcacheinfo->dcacheline_size;
}
start = K0BASE;
while(start < end)
{
pcacheop->Invalidate_Dcache_ClearTag(start);
start += pcacheinfo->dcacheline_size;
}
/* enable cache */
enable_cpu_cache();
rt_kprintf("enable cpu cache done\n");
return ;
}

View File

@ -0,0 +1,51 @@
/*
* Cache Ops For Loongson GS232
*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-07-09 Bernard first version
* 2011-08-08 lgnq modified for LS1B
* 2015-07-08 chinesebear modified for loongson 1c
*/
#ifndef __CACHE_H__
#define __CACHE_H__
/*
* Cache Operations
*/
#define Index_Invalidate_I 0x00
#define Index_Writeback_Inv_D 0x01
#define Index_Invalidate_SI 0x02
#define Index_Writeback_Inv_SD 0x03
#define Index_Load_Tag_I 0x04
#define Index_Load_Tag_D 0x05
#define Index_Load_Tag_SI 0x06
#define Index_Load_Tag_SD 0x07
#define Index_Store_Tag_I 0x08
#define Index_Store_Tag_D 0x09
#define Index_Store_Tag_SI 0x0A
#define Index_Store_Tag_SD 0x0B
#define Create_Dirty_Excl_D 0x0d
#define Create_Dirty_Excl_SD 0x0f
#define Hit_Invalidate_I 0x10
#define Hit_Invalidate_D 0x11
#define Hit_Invalidate_SI 0x12
#define Hit_Invalidate_SD 0x13
#define Fill 0x14
#define Hit_Writeback_Inv_D 0x15
/* 0x16 is unused */
#define Hit_Writeback_Inv_SD 0x17
#define Hit_Writeback_I 0x18
#define Hit_Writeback_D 0x19
/* 0x1a is unused */
#define Hit_Writeback_SD 0x1b
/* 0x1c is unused */
/* 0x1e is unused */
#define Hit_Set_Virtual_SI 0x1e
#define Hit_Set_Virtual_SD 0x1f
#endif

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-05-17 swkyer first version
* 2010-09-11 bernard port to Loongson SoC3210
* 2011-08-08 lgnq port to Loongson LS1B
* 2015-07-08 chinesebear port to Loongson LS1C
* 2019-07-19 Zhou Yanjie clean up code
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
#include "cache.h"
.ent cache_init
.global cache_init
.set noreorder
cache_init:
move t1,ra
####part 2####
cache_detect_4way:
mfc0 t4, CP0_CONFIG
andi t5, t4, 0x0e00
srl t5, t5, 9 #ic
andi t6, t4, 0x01c0
srl t6, t6, 6 #dc
addiu t8, $0, 1
addiu t9, $0, 2
#set dcache way
beq t6, $0, cache_d1way
addiu t7, $0, 1 #1 way
beq t6, t8, cache_d2way
addiu t7, $0, 2 #2 way
beq $0, $0, cache_d4way
addiu t7, $0, 4 #4 way
cache_d1way:
beq $0, $0, 1f
addiu t6, t6, 12 #1 way
cache_d2way:
beq $0, $0, 1f
addiu t6, t6, 11 #2 way
cache_d4way:
addiu t6, t6, 10 #4 way (10), 2 way(11), 1 way(12)
1: #set icache way
beq t5, $0, cache_i1way
addiu t3, $0, 1 #1 way
beq t5, t8, cache_i2way
addiu t3, $0, 2 #2 way
beq $0, $0, cache_i4way
addiu t3, $0, 4 #4 way
cache_i1way:
beq $0, $0, 1f
addiu t5, t5, 12
cache_i2way:
beq $0, $0, 1f
addiu t5, t5, 11
cache_i4way:
addiu t5, t5, 10 #4 way (10), 2 way(11), 1 way(12)
1: addiu t4, $0, 1
sllv t6, t4, t6
sllv t5, t4, t5
#if 0
la t0, memvar
sw t7, 0x0(t0) #ways
sw t5, 0x4(t0) #icache size
sw t6, 0x8(t0) #dcache size
#endif
####part 3####
.set mips3
lui a0, 0x8000
addu a1, $0, t5
addu a2, $0, t6
cache_init_d2way:
#a0=0x80000000, a1=icache_size, a2=dcache_size
#a3, v0 and v1 used as local registers
mtc0 $0, CP0_TAGHI
addu v0, $0, a0
addu v1, a0, a2
1: slt a3, v0, v1
beq a3, $0, 1f
nop
mtc0 $0, CP0_TAGLO
beq t7, 1, 4f
cache Index_Store_Tag_D, 0x0(v0) # 1 way
beq t7, 2 ,4f
cache Index_Store_Tag_D, 0x1(v0) # 2 way
cache Index_Store_Tag_D, 0x2(v0) # 4 way
cache Index_Store_Tag_D, 0x3(v0)
4: beq $0, $0, 1b
addiu v0, v0, 0x20
1:
cache_flush_i2way:
addu v0, $0, a0
addu v1, a0, a1
1: slt a3, v0, v1
beq a3, $0, 1f
nop
beq t3, 1, 4f
cache Index_Invalidate_I, 0x0(v0) # 1 way
beq t3, 2, 4f
cache Index_Invalidate_I, 0x1(v0) # 2 way
cache Index_Invalidate_I, 0x2(v0)
cache Index_Invalidate_I, 0x3(v0) # 4 way
4: beq $0, $0, 1b
addiu v0, v0, 0x20
1:
cache_flush_d2way:
addu v0, $0, a0
addu v1, a0, a2
1: slt a3, v0, v1
beq a3, $0, 1f
nop
beq t7, 1, 4f
cache Index_Writeback_Inv_D, 0x0(v0) #1 way
beq t7, 2, 4f
cache Index_Writeback_Inv_D, 0x1(v0) # 2 way
cache Index_Writeback_Inv_D, 0x2(v0)
cache Index_Writeback_Inv_D, 0x3(v0) # 4 way
4: beq $0, $0, 1b
addiu v0, v0, 0x20
1:
cache_init_finish:
jr t1
nop
.set reorder
.end cache_init
###########################
# Enable CPU cache #
###########################
LEAF(enable_cpu_cache)
.set noreorder
mfc0 t0, CP0_CONFIG
nop
and t0, ~0x03
or t0, 0x03
mtc0 t0, CP0_CONFIG
nop
.set reorder
j ra
END (enable_cpu_cache)
###########################
# disable CPU cache #
###########################
LEAF(disable_cpu_cache)
.set noreorder
mfc0 t0, CP0_CONFIG
nop
and t0, ~0x03
or t0, 0x2
mtc0 t0, CP0_CONFIG
nop
.set reorder
j ra
END (disable_cpu_cache)
/**********************************/
/* Invalidate Instruction Cache */
/**********************************/
LEAF(Clear_TagLo)
.set noreorder
mtc0 zero, CP0_TAGLO
nop
.set reorder
j ra
END(Clear_TagLo)
.set mips3
/**********************************/
/* Invalidate Instruction Cache */
/**********************************/
LEAF(Invalidate_Icache_Ls1c)
.set noreorder
cache Index_Invalidate_I,0(a0)
cache Index_Invalidate_I,1(a0)
cache Index_Invalidate_I,2(a0)
cache Index_Invalidate_I,3(a0)
.set reorder
j ra
END(Invalidate_Icache_Ls1c)
/**********************************/
/* Invalidate Data Cache */
/**********************************/
LEAF(Invalidate_Dcache_ClearTag_Ls1c)
.set noreorder
cache Index_Store_Tag_D, 0(a0) # BDSLOT: clear tag
cache Index_Store_Tag_D, 1(a0) # BDSLOT: clear tag
.set reorder
j ra
END(Invalidate_Dcache_ClearTag_Ls1c)
LEAF(Invalidate_Dcache_Fill_Ls1c)
.set noreorder
cache Index_Writeback_Inv_D, 0(a0) # BDSLOT: clear tag
cache Index_Writeback_Inv_D, 1(a0) # BDSLOT: clear tag
.set reorder
j ra
END(Invalidate_Dcache_Fill_Ls1c)
LEAF(Writeback_Invalidate_Dcache)
.set noreorder
cache Hit_Writeback_Inv_D, (a0)
.set reorder
j ra
END(Writeback_Invalidate_Dcache)
.set mips0

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
.section ".start", "ax"
.set noreorder
.globl rt_cpu_early_init
rt_cpu_early_init:
jr ra
nop

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-07-09 Bernard first version
* 2010-09-11 Bernard add CPU reset implementation
* 2015-07-06 chinesebear modified for loongson 1c
*/
#include <rtthread.h>
#include "gs232.h"
/**
* @addtogroup Loongson GS232
*/
/*@{*/
/**
* this function will reset CPU
*
*/
void rt_hw_cpu_reset(void)
{
/* open the watch-dog */
WDT_EN = 0x01; /* watch dog enable */
WDT_TIMER = 0x01; /* watch dog will be timeout after 1 tick */
WDT_SET = 0x01; /* watch dog start */
rt_kprintf("reboot system...\n");
while (1);
}
#define Hit_Invalidate_I 0x10
#define Hit_Invalidate_D 0x11
#define CONFIG_SYS_CACHELINE_SIZE 32
#define Hit_Writeback_Inv_D 0x15
void flush_cache(unsigned long start_addr, unsigned long size)
{
unsigned long lsize = CONFIG_SYS_CACHELINE_SIZE;
unsigned long addr = start_addr & ~(lsize - 1);
unsigned long aend = (start_addr + size - 1) & ~(lsize - 1);
while (1) {
cache_op(Hit_Writeback_Inv_D, addr);
cache_op(Hit_Invalidate_I, addr);
if (addr == aend)
break;
addr += lsize;
}
}
/*@}*/

View File

@ -0,0 +1,74 @@
/*
* Misc define for GS232
*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef __GS232_H__
#define __GS232_H__
#include <mips.h>
#define INTC_BASE 0xBFD01040
#ifdef SOC_LS1B
#define GS232_INTC_CELLS 4
#endif
#ifdef SOC_LS1C300
#define GS232_INTC_CELLS 5
#endif
#define GS232_NR_IRQS (32 * GS232_INTC_CELLS)
#define GMAC0_BASE 0xBFE10000
#define GMAC0_DMA_BASE 0xBFE11000
#define GMAC1_BASE 0xBFE20000
#define GMAC1_DMA_BASE 0xBFE21000
#define I2C0_BASE 0xBFE58000
#define PWM0_BASE 0xBFE5C000
#define PWM1_BASE 0xBFE5C010
#define PWM2_BASE 0xBFE5C020
#define PWM3_BASE 0xBFE5C030
#define WDT_BASE 0xBFE5C060
#define RTC_BASE 0xBFE64000
#define I2C1_BASE 0xBFE68000
#define I2C2_BASE 0xBFE70000
#define AC97_BASE 0xBFE74000
#define NAND_BASE 0xBFE78000
#define SPI_BASE 0xBFE80000
#define CAN1_BASE 0xBF004300
#define CAN0_BASE 0xBF004400
#ifndef __ASSEMBLY__
#include <rthw.h>
/* Watch Dog registers */
#define WDT_EN HWREG32(WDT_BASE + 0x00)
#define WDT_SET HWREG32(WDT_BASE + 0x08)
#define WDT_TIMER HWREG32(WDT_BASE + 0x04)
#define PLL_FREQ HWREG32(0xbfe78030)
#define PLL_DIV_PARAM HWREG32(0xbfe78034)
struct gs232_intc_regs
{
volatile unsigned int int_isr;
volatile unsigned int int_en;
volatile unsigned int int_set;
volatile unsigned int int_clr; /* offset 0x10*/
volatile unsigned int int_pol;
volatile unsigned int int_edge; /* offset 0 */
};
extern void rt_hw_timer_init(void);
#endif
#endif

View File

@ -0,0 +1,161 @@
/*
* Interrupt handle for GS232
*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-10-15 Bernard first version
* 2010-10-15 lgnq modified for LS1B
* 2013-03-29 aozima Modify the interrupt interface implementations.
* 2015-07-06 chinesebear modified for loongson 1c
* 2019-12-04 Jiaxun Yang Generialize
*/
#include <rtthread.h>
#include <rthw.h>
#include "gs232.h"
#define MAX_INTR (GS232_NR_IRQS)
static struct rt_irq_desc irq_handle_table[MAX_INTR];
void rt_hw_timer_handler();
static struct gs232_intc_regs volatile *gs232_hw0_icregs
= (struct gs232_intc_regs volatile *)(INTC_BASE);
/**
* @addtogroup Loongson GS232
*/
/*@{*/
static void rt_hw_interrupt_handler(int vector, void *param)
{
rt_kprintf("Unhandled interrupt %d occured!!!\n", vector);
}
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
rt_int32_t idx;
rt_int32_t i;
rt_uint32_t c0_status = 0;
for (i=0; i < GS232_INTC_CELLS; i++)
{
/* Disable */
(gs232_hw0_icregs+i)->int_en = 0x0;
/* Trigger active low */
(gs232_hw0_icregs+i)->int_pol = -1; /* Must be done here */
/* Make all interrupts level triggered */
(gs232_hw0_icregs+i)->int_edge = 0x00000000;
/* Mask all interrupts */
(gs232_hw0_icregs+i)->int_clr = 0xffffffff;
mips_unmask_cpu_irq(i + 2);
}
rt_memset(irq_handle_table, 0x00, sizeof(irq_handle_table));
for (idx = 0; idx < MAX_INTR; idx ++)
{
irq_handle_table[idx].handler = rt_hw_interrupt_handler;
}
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
/* mask interrupt */
(gs232_hw0_icregs+(vector>>5))->int_en &= ~(1 << (vector&0x1f));
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
(gs232_hw0_icregs+(vector>>5))->int_en |= (1 << (vector&0x1f));
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_isr_handler_t old_handler = RT_NULL;
if (vector >= 0 && vector < MAX_INTR)
{
old_handler = irq_handle_table[vector].handler;
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(irq_handle_table[vector].name, name, RT_NAME_MAX);
#endif /* RT_USING_INTERRUPT_INFO */
irq_handle_table[vector].handler = handler;
irq_handle_table[vector].param = param;
}
return old_handler;
}
/**
* Call ISR
* @IRQn ID of IRQ
*/
void gs232_do_IRQ(int IRQn)
{
rt_isr_handler_t irq_func;
void *param;
irq_func = irq_handle_table[IRQn].handler;
param = irq_handle_table[IRQn].param;
irq_func(IRQn, param);
#ifdef RT_USING_INTERRUPT_INFO
irq_handle_table[IRQn].counter++;
#endif
return ;
}
void rt_do_mips_cpu_irq(rt_uint32_t ip)
{
rt_uint32_t intstatus, irq, n;
if (ip == 7) {
rt_hw_timer_handler();
} else {
n = ip - 2;
/* Receive interrupt signal, compute the irq */
intstatus = (gs232_hw0_icregs+n)->int_isr & (gs232_hw0_icregs+n)->int_en;
if (0 == intstatus)
return ;
irq = __rt_ffs(intstatus) - 1;
gs232_do_IRQ((n<<5) + irq);
/* ack interrupt */
(gs232_hw0_icregs+n)->int_clr |= (1 << irq);
}
}
/*@}*/

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-08-08 lgnq first version
*/
#ifndef __LS1B_H__
#define __LS1B_H__
#include <gs232.h>
#define LS1B_ACPI_IRQ 0
#define LS1B_HPET_IRQ 1
#define LS1B_UART0_IRQ 2
#define LS1B_UART1_IRQ 3
#define LS1B_UART2_IRQ 4
#define LS1B_UART3_IRQ 5
#define LS1B_UART4_IRQ 29
#define LS1B_UART5_IRQ 30
#define LS1B_UART6_IRQ 2 //共享LS1B_UART0_IRQ
#define LS1B_UART7_IRQ 2
#define LS1B_UART8_IRQ 2
#define LS1B_UART9_IRQ 3 //共享LS1B_UART1_IRQ
#define LS1B_UART10_IRQ 3
#define LS1B_UART11_IRQ 3
#define LS1B_CAN0_IRQ 6
#define LS1B_CAN1_IRQ 7
#define LS1B_SPI0_IRQ 8
#define LS1B_SPI1_IRQ 9
#define LS1B_AC97_IRQ 10
#define LS1B_MS_IRQ 11
#define LS1B_KB_IRQ 12
#define LS1B_DMA0_IRQ 13
#define LS1B_DMA1_IRQ 14
#define LS1B_NAND_IRQ 15
#define LS1B_I2C0_IRQ 16
#define LS1B_I2C1_IRQ 17
#define LS1B_PWM0_IRQ 18
#define LS1B_PWM1_IRQ 19
#define LS1B_PWM2_IRQ 20
#define LS1B_PWM3_IRQ 21
#define LS1B_LPC_IRQ 22
#define LS1B_EHCI_IRQ 32
#define LS1B_OHCI_IRQ 33
#define LS1B_GMAC1_IRQ 34
#define LS1B_GMAC2_IRQ 35
#define LS1B_SATA_IRQ 36
#define LS1B_GPU_IRQ 37
#define LS1B_PCI_INTA_IRQ 38
#define LS1B_PCI_INTB_IRQ 39
#define LS1B_PCI_INTC_IRQ 40
#define LS1B_PCI_INTD_IRQ 41
#define LS1B_GPIO_IRQ 64
#define LS1B_GPIO_FIRST_IRQ 64
#define LS1B_GPIO_IRQ_COUNT 64
#define LS1B_GPIO_LAST_IRQ (LS1B_GPIO_FIRST_IRQ + LS1B_GPIO_IRQ_COUNT-1)
#define INT_PCI_INTA (1<<6)
#define INT_PCI_INTB (1<<7)
#define INT_PCI_INTC (1<<8)
#define INT_PCI_INTD (1<<9)
#define LS1B_LAST_IRQ 159
#define MIPS_CPU_TIMER_IRQ 167
#define LS1B_INTREG_BASE 0xbfd01040
#define LS1B_DMA_IRQ_BASE 168
#define LS1B_DMA_IRQ_COUNT 16
#endif

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-08-08 lgnq first version
* 2015-07-06 chinesebear modified for loongson 1c
*/
#ifndef __LS1C_H__
#define __LS1C_H__
#include <gs232.h>
#define LS1C_ACPI_IRQ 0
#define LS1C_HPET_IRQ 1
//#define LS1C_UART0_IRQ 3 // linux使用3v1.4版本之前1c分配有问题建议使用2以确保正确
#define LS1C_UART1_IRQ 4
#define LS1C_UART2_IRQ 5
#define LS1C_CAN0_IRQ 6
#define LS1C_CAN1_IRQ 7
#define LS1C_SPI0_IRQ 8
#define LS1C_SPI1_IRQ 9
#define LS1C_AC97_IRQ 10
#define LS1C_MS_IRQ 11
#define LS1C_KB_IRQ 12
#define LS1C_DMA0_IRQ 13
#define LS1C_DMA1_IRQ 14
#define LS1C_DMA2_IRQ 15
#define LS1C_NAND_IRQ 16
#define LS1C_PWM0_IRQ 17
#define LS1C_PWM1_IRQ 18
#define LS1C_PWM2_IRQ 19
#define LS1C_PWM3_IRQ 20
#define LS1C_RTC_INT0_IRQ 21
#define LS1C_RTC_INT1_IRQ 22
#define LS1C_RTC_INT2_IRQ 23
#define LS1C_UART3_IRQ 29
#define LS1C_ADC_IRQ 30
#define LS1C_SDIO_IRQ 31
#define LS1C_EHCI_IRQ (32+0)
#define LS1C_OHCI_IRQ (32+1)
#define LS1C_OTG_IRQ (32+2)
#define LS1C_MAC_IRQ (32+3)
#define LS1C_CAM_IRQ (32+4)
#define LS1C_UART4_IRQ (32+5)
#define LS1C_UART5_IRQ (32+6)
#define LS1C_UART6_IRQ (32+7)
#define LS1C_UART7_IRQ (32+8)
#define LS1C_UART8_IRQ (32+9)
#define LS1C_UART9_IRQ (32+13)
#define LS1C_UART10_IRQ (32+14)
#define LS1C_UART11_IRQ (32+15)
#define LS1C_I2C2_IRQ (32+17)
#define LS1C_I2C1_IRQ (32+18)
#define LS1C_I2C0_IRQ (32+19)
#define LS1C_GPIO_IRQ 64
#define LS1C_GPIO_FIRST_IRQ 64
#define LS1C_GPIO_IRQ_COUNT 96
#define LS1C_GPIO_LAST_IRQ (LS1C_GPIO_FIRST_IRQ + LS1C_GPIO_IRQ_COUNT-1)
#define LS1C_LAST_IRQ 159
#define LS1C_INTREG_BASE 0xbfd01040
// 将1c的中断分为五组每组32个
#define LS1C_NR_IRQS (32*5)
// GPIO引脚到中断号之间的转换
#define LS1C_GPIO_TO_IRQ(GPIOn) (LS1C_GPIO_FIRST_IRQ + (GPIOn))
#define LS1C_IRQ_TO_GPIO(IRQn) ((IRQn) - LS1C_GPIO_FIRST_IRQ)
#endif

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-05-27 swkyer first version
*/
#include <rtthread.h>
#include <mips.h>
mips32_core_cfg_t g_mips_core =
{
16, /* icache_line_size */
256, /* icache_lines_per_way */
4, /* icache_ways */
16, /* dcache_line_size */
256, /* dcache_lines_per_way */
4, /* dcache_ways */
16, /* max_tlb_entries */
};
static rt_uint16_t m_pow(rt_uint16_t b, rt_uint16_t n)
{
rt_uint16_t rets = 1;
while (n--)
rets *= b;
return rets;
}
static rt_uint16_t m_log2(rt_uint16_t b)
{
rt_uint16_t rets = 0;
while (b != 1)
{
b /= 2;
rets++;
}
return rets;
}
/**
* read core attribute
*/
void mips32_cfg_init(void)
{
rt_uint16_t val;
rt_uint32_t cp0_config1;
cp0_config1 = read_c0_config();
if (cp0_config1 & 0x80000000)
{
cp0_config1 = read_c0_config1();
val = (cp0_config1 & (7<<22))>>22;
g_mips_core.icache_lines_per_way = 64 * m_pow(2, val);
val = (cp0_config1 & (7<<19))>>19;
g_mips_core.icache_line_size = 2 * m_pow(2, val);
val = (cp0_config1 & (7<<16))>>16;
g_mips_core.icache_ways = val + 1;
val = (cp0_config1 & (7<<13))>>13;
g_mips_core.dcache_lines_per_way = 64 * m_pow(2, val);
val = (cp0_config1 & (7<<10))>>10;
g_mips_core.dcache_line_size = 2 * m_pow(2, val);
val = (cp0_config1 & (7<<7))>>7;
g_mips_core.dcache_ways = val + 1;
val = (cp0_config1 & (0x3F<<25))>>25;
g_mips_core.max_tlb_entries = val + 1;
}
}

View File

@ -0,0 +1,41 @@
/*
* Assembly Macros For MIPS
*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#include <rtthread.h>
#include <rthw.h>
#include <mips.h>
#include <board.h>
/**
* This is the timer interrupt service routine.
*/
void rt_hw_timer_handler(void)
{
unsigned int count;
count = read_c0_compare();
write_c0_compare(count);
write_c0_count(0);
/* increase a OS tick */
rt_tick_increase();
}
/**
* This function will initial OS timer
*/
void rt_hw_timer_init(void)
{
write_c0_compare(CPU_HZ/2/RT_TICK_PER_SECOND);
write_c0_count(0);
mips_unmask_cpu_irq(7);
}

View File

@ -0,0 +1,13 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-03-29 quanzhao the first version
* 2020-07-26 lizhirui porting to ls2k
*/
#include <rthw.h>
#include <rtdef.h>
rt_inline rt_uint64_t rt_cpu_icache_line_size(void)
{
rt_uint64_t ctr;
return 4 << (ctr & 0xF);
}
rt_inline rt_uint64_t rt_cpu_dcache_line_size(void)
{
rt_uint64_t ctr;
return 4 << ((ctr >> 16) & 0xF);
}
void rt_hw_cpu_icache_invalidate(void *addr, int size)
{
rt_uint64_t line_size = rt_cpu_icache_line_size();
rt_uint64_t start_addr = (rt_uint64_t)addr;
rt_uint64_t end_addr = (rt_uint64_t) addr + size + line_size - 1;
}
void rt_hw_cpu_dcache_invalidate(void *addr, int size)
{
rt_uint64_t line_size = rt_cpu_dcache_line_size();
rt_uint64_t start_addr = (rt_uint64_t)addr;
rt_uint64_t end_addr = (rt_uint64_t) addr + size + line_size - 1;
}
void rt_hw_cpu_dcache_clean(void *addr, int size)
{
rt_uint64_t line_size = rt_cpu_dcache_line_size();
rt_uint64_t start_addr = (rt_uint64_t)addr;
rt_uint64_t end_addr = (rt_uint64_t) addr + size + line_size - 1;
}
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_INVALIDATE)
rt_hw_cpu_icache_invalidate(addr, size);
}
void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_FLUSH)
rt_hw_cpu_dcache_clean(addr, size);
else if (ops == RT_HW_CACHE_INVALIDATE)
rt_hw_cpu_dcache_invalidate(addr, size);
}
rt_base_t rt_hw_cpu_icache_status(void)
{
return 0;
}
rt_base_t rt_hw_cpu_dcache_status(void)
{
return 0;
}

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-03-25 quanzhao the first version
*/
#ifndef __CACHE_H__
#define __CACHE_H__
unsigned long rt_cpu_get_smp_id(void);
void rt_cpu_mmu_disable(void);
void rt_cpu_mmu_enable(void);
void rt_cpu_tlb_set(volatile unsigned long*);
void rt_cpu_dcache_clean_flush(void);
void rt_cpu_icache_flush(void);
void rt_cpu_vector_set_base(unsigned int addr);
#endif

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-04-05 bigmagic Initial version
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
.section ".text", "ax"
.set noreorder
.globl rt_cpu_early_init
rt_cpu_early_init:
mfc0 t0, CP0_CONFIG
ori t0, 3
mtc0 t0, CP0_CONFIG
ehb
jr ra

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef CPUPORT_H__
#define CPUPORT_H__
#ifdef RT_USING_SMP
typedef union {
unsigned long slock;
struct __arch_tickets {
unsigned short owner;
unsigned short next;
} tickets;
} rt_hw_spinlock_t;
#endif
#endif /*CPUPORT_H__*/

View File

@ -0,0 +1,222 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-07-26 lizhirui the first version
*/
#include <string.h>
#include <stdlib.h>
#include "mips.h"
#include "mips_mmu.h"
void mmu_init()
{
uint32_t status = read_c0_status();
status |= 0x07 << 5;//ux = 1,sx = 1,kx = 1
write_c0_status(status);
mmu_clear_tlb();
mmu_clear_itlb();
}
void mmu_set_cpu_mode(cpu_mode_t cpu_mode)
{
uint32_t status = read_c0_status();
status &= ~(0x03 << 3);
status |= ((uint32_t)cpu_mode & 0x03) << 3;
write_c0_status(status);
}
cpu_mode_t mmu_get_cpu_mode()
{
uint32_t status = read_c0_status();
return (cpu_mode_t)((status >> 3) & 0x03);
}
void mmu_clear_tlb()
{
uint32_t max_tlb_index = mmu_get_max_tlb_index();
uint64_t va = KSEG0BASE;
uint32_t entry;
tlb_item_t tlb_item;
for(entry = 0;entry <= max_tlb_index;entry++)
{
mmu_tlb_item_init(&tlb_item);
mmu_tlb_write_indexed(entry,&tlb_item);
}
}
void mmu_clear_itlb()
{
uint32_t diag = read_c0_diag();
write_c0_diag(diag | (0x01 << 2));//write ITLB bit
read_c0_entrylo0();
}
uint32_t mmu_get_max_tlb_index()
{
uint32_t config1 = read_c0_config1();
return ((config1 >> 25) & 0x3F);
}
void mmu_tlb_write_indexed(uint32_t index,tlb_item_t *tlb_item)
{
tlb_item -> entry_lo[0].g |= tlb_item -> entry_lo[1].g;
tlb_item -> entry_lo[1].g |= tlb_item -> entry_lo[0].g;
mmu_tlb_set_index(index);
write_c0_entrylo0(reg_type_convert(tlb_item -> entry_lo[0],uint64_t));
write_c0_entrylo1(reg_type_convert(tlb_item -> entry_lo[1],uint64_t));
write_c0_entryhi(reg_type_convert(tlb_item -> entry_hi,uint64_t));
write_c0_pagemask(reg_type_convert(tlb_item -> page_mask,uint64_t));
tlb_write_indexed();
read_c0_entrylo0();
}
void mmu_tlb_write_random(tlb_item_t *tlb_item)
{
tlb_item -> entry_lo[0].g |= tlb_item -> entry_lo[1].g;
tlb_item -> entry_lo[1].g |= tlb_item -> entry_lo[0].g;
write_c0_entrylo0(reg_type_convert(tlb_item -> entry_lo[0],uint64_t));
write_c0_entrylo1(reg_type_convert(tlb_item -> entry_lo[1],uint64_t));
write_c0_entryhi(reg_type_convert(tlb_item -> entry_hi,uint64_t));
write_c0_pagemask(reg_type_convert(tlb_item -> page_mask,uint64_t));
tlb_write_random();
read_c0_entrylo0();
}
void mmu_tlb_read(uint32_t index,tlb_item_t *tlb_item)
{
mmu_tlb_set_index(index);
tlb_read();
uint64_t entrylo[2];
uint64_t entryhi;
uint64_t page_mask;
entrylo[0] = read_c0_entrylo0();
entrylo[1] = read_c0_entrylo1();
entryhi = read_c0_entryhi();
page_mask = read_c0_pagemask();
tlb_item -> entry_lo[0] = reg_type_convert(entrylo[0],entry_lo_t);
tlb_item -> entry_lo[1] = reg_type_convert(entrylo[1],entry_lo_t);
tlb_item -> entry_hi = reg_type_convert(entryhi,entry_hi_t);
tlb_item -> page_mask = reg_type_convert(page_mask,page_mask_t);
}
uint32_t mmu_tlb_find(uint64_t vpn,uint32_t asid,uint32_t *index)
{
entry_hi_t entry_hi;
entry_hi.r = (vpn >> 62) & 0x03;
entry_hi.vpn2 = (vpn >> 13) & 0x7FFFFFFU;
entry_hi.asid = asid & 0xFFU;
tlb_item_t tlb_item;
//mmu_tlb_read(6,&tlb_item);
//tlb_dump();
mmu_tlb_item_init(&tlb_item);
tlb_item.entry_lo[0].g = tlb_item.entry_lo[1].g = 1;
read_c0_entrylo0();//i don't know why,but if i don't read any register of mmu,tplb will be failed in qemu.
write_c0_entrylo0(reg_type_convert(tlb_item.entry_lo[0],uint64_t));
write_c0_entrylo1(reg_type_convert(tlb_item.entry_lo[1],uint64_t));
write_c0_entryhi(reg_type_convert(entry_hi,uint64_t));
//__asm__ __volatile__("ehb");
//read_c0_entryhi();
//rt_kprintf("entry_hi = %p\n",read_c0_entryhi());
tlb_probe();
*index = mmu_tlb_get_index();
return mmu_tlb_is_matched();
}
void mmu_tlb_item_init(tlb_item_t *tlb_item)
{
memset(tlb_item,0,sizeof(tlb_item_t));
tlb_item -> entry_lo[0].c = 0x03;
tlb_item -> entry_lo[1].c = 0x03;
}
void mmu_set_map(uint64_t vpn,uint64_t ppn,page_mask_enum_t page_mask,uint32_t asid,uint32_t global)
{
uint64_t page_mask_v = (uint64_t)page_mask;
/*if(page_mask_v & (1 << 13))
{
page_mask_v |= (1 << 12);
}*/
uint64_t lb = lowbit((~(page_mask_v)) << 12);
uint64_t pn_remained = ((~(page_mask_v)) << 12) | lb;
vpn &= pn_remained;
ppn &= pn_remained;
uint64_t odd_vpn = vpn | lb;
uint64_t even_vpn = vpn & (~lb);
uint32_t index;
tlb_item_t tlb_item,tlb2_item;
mmu_tlb_item_init(&tlb_item);
mmu_tlb_item_init(&tlb2_item);
tlb_item.page_mask.mask = page_mask;
if(mmu_tlb_find(vpn & (~lb),asid,&index))
{
mmu_tlb_read(index,&tlb_item);
mmu_tlb_write_indexed(index,&tlb2_item);
}
entry_lo_t *entry_lo = &tlb_item.entry_lo[vpn == even_vpn ? 0 : 1];
tlb_item.entry_lo[0].g = tlb_item.entry_lo[1].g = global;
entry_lo -> d = 1;
entry_lo -> ri = 0;
entry_lo -> xi = 0;
entry_lo -> v = 1;
entry_lo -> pfn = ppn >> 12;
tlb_item.entry_hi.r = (vpn >> 62) & 0x03;
tlb_item.entry_hi.vpn2 = (vpn >> 13) & 0x7FFFFFFU;
tlb_item.entry_hi.asid = asid & 0xFFU;
mmu_tlb_write_random(&tlb_item);
}
uint32_t mmu_tlb_get_random()
{
return read_c0_random();
}
uint32_t mmu_tlb_get_index()
{
return read_c0_index() & 0x3F;
}
void mmu_tlb_set_index(uint32_t index)
{
write_c0_index(index & 0x3F);
}
uint32_t mmu_tlb_is_matched()
{
return (read_c0_index() & 0x80000000) == 0;
}
uint64_t mmu_tlb_get_bad_vaddr()
{
return read_c0_badvaddr();
}
void tlb_dump()
{
uint32_t max_index = mmu_get_max_tlb_index();
//uint32_t max_index = 10;
uint32_t entry;
tlb_item_t tlb_item;
for(entry = 0;entry <= max_index;entry++)
{
mmu_tlb_read(entry,&tlb_item);
//mmu_tlb_write_indexed(entry,&tlb_item);
//mmu_tlb_read(entry,&tlb_item);
rt_kprintf("vpn = 0x%p,ppn0 = 0x%p,ppn1 = 0x%p\n",(uint64_t)tlb_item.entry_hi.vpn2 << 13 | (uint64_t)tlb_item.entry_hi.asid << 62,(uint64_t)tlb_item.entry_lo[0].pfn << 12,(uint64_t)tlb_item.entry_lo[1].pfn << 12);
rt_kprintf("v = %d,d = %d,g = %d,ri = %d,xi = %d,c = %d\n",tlb_item.entry_lo[0].v,tlb_item.entry_lo[0].d,tlb_item.entry_lo[0].g,tlb_item.entry_lo[0].ri,tlb_item.entry_lo[0].xi,tlb_item.entry_lo[0].c);
rt_kprintf("v = %d,d = %d,g = %d,ri = %d,xi = %d,c = %d\n",tlb_item.entry_lo[1].v,tlb_item.entry_lo[1].d,tlb_item.entry_lo[1].g,tlb_item.entry_lo[1].ri,tlb_item.entry_lo[1].xi,tlb_item.entry_lo[1].c);
}
}

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-07-26 lizhirui the first version
*/
#ifndef __MIPS_MMU_H__
#define __MIPS_MMU_H__
typedef enum cpu_mode_t
{
CPU_MODE_KERNEL = 0x00,
CPU_MODE_SUPERVISOR = 0x01,
CPU_MODE_USER = 0x02
}cpu_mode_t;
typedef enum page_mask_enum_t
{
PAGE_MASK_4KB = 0x00,
PAGE_MASK_16KB = 0x03,
PAGE_MASK_64KB = 0x0F,
PAGE_MASK_256KB = 0x3F,
PAGE_MASK_1MB = 0xFF,
PAGE_MASK_4MB = 0x3FF,
PAGE_MASK_16MB = 0xFFF,
PAGE_MASK_64MB = 0x3FFF,
PAGE_MASK_256MB = 0xFFFF,
PAGE_MASK_1GB = 0x3FFFF
}page_mask_enum_t;
typedef struct page_mask_t
{
uint64_t : 11;
uint64_t : 2;
uint64_t mask : 18;
uint64_t : 33;
}page_mask_t;
typedef struct entry_lo_t
{
uint64_t g : 1;
uint64_t v : 1;
uint64_t d : 1;
uint64_t c : 3;
uint64_t pfn : 24;
uint64_t pfnx : 3;
uint64_t : 29;
uint64_t xi : 1;
uint64_t ri : 1;
}entry_lo_t;
typedef struct entry_hi_t
{
uint64_t asid : 8;
uint64_t : 5;
uint64_t vpn2 : 27;
uint64_t : 22;
uint64_t r : 2;
}entry_hi_t;
typedef struct tlb_item_t
{
entry_lo_t entry_lo[2];
entry_hi_t entry_hi;
page_mask_t page_mask;
}tlb_item_t;
#define read_c0_diag() __read_32bit_c0_register($22, 0)
#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val)
#define read_c0_badvaddr() __read_64bit_c0_register($8, 0)
#define read_c0_random() __read_32bit_c0_register($1, 0)
#define reg_type_convert(variable,new_type) *((new_type *)(&variable))
#define lowbit(x) ((x) & (-(x)))
void mmu_init();
void mmu_set_cpu_mode(cpu_mode_t cpu_mode);
cpu_mode_t mmu_get_cpu_mode();
void mmu_clear_tlb();
void mmu_clear_itlb();
uint32_t mmu_get_max_tlb_index();
void mmu_tlb_write_indexed(uint32_t index,tlb_item_t *tlb_item);
void mmu_tlb_write_random(tlb_item_t *tlb_item);
void mmu_tlb_read(uint32_t index,tlb_item_t *tlb_item);
uint32_t mmu_tlb_find(uint64_t vpn,uint32_t asid,uint32_t *index);
void mmu_tlb_item_init(tlb_item_t *tlb_item);
void mmu_set_map(uint64_t vpn,uint64_t ppn,page_mask_enum_t page_mask,uint32_t asid,uint32_t global);
uint32_t mmu_tlb_get_random();
uint32_t mmu_tlb_get_index();
void mmu_tlb_set_index(uint32_t index);
uint32_t mmu_tlb_is_matched();
uint64_t mmu_tlb_get_bad_vaddr();
void tlb_dump();
#endif

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-07-28 lizhirui first version
*/
#include <rtthread.h>
#include <mips.h>
mips32_core_cfg_t g_mips_core =
{
64, /* icache_line_size */
128, /* icache_lines_per_way */
4, /* icache_ways */
32768,
64, /* dcache_line_size */
128, /* dcache_lines_per_way */
4, /* dcache_ways */
32768,
64, /* max_tlb_entries */
};
static rt_uint16_t m_pow(rt_uint16_t b, rt_uint16_t n)
{
rt_uint16_t rets = 1;
while (n--)
rets *= b;
return rets;
}
static rt_uint16_t m_log2(rt_uint16_t b)
{
rt_uint16_t rets = 0;
while (b != 1)
{
b /= 2;
rets++;
}
return rets;
}
/**
* read core attribute
*/
void mips32_cfg_init(void)
{
rt_uint16_t val;
rt_uint32_t cp0_config1;
cp0_config1 = read_c0_config();
if (cp0_config1 & 0x80000000)
{
cp0_config1 = read_c0_config1();
val = (cp0_config1 & (7<<22))>>22;
g_mips_core.icache_lines_per_way = 64 * m_pow(2, val);
val = (cp0_config1 & (7<<19))>>19;
g_mips_core.icache_line_size = 2 * m_pow(2, val);
val = (cp0_config1 & (7<<16))>>16;
g_mips_core.icache_ways = val + 1;
val = (cp0_config1 & (7<<13))>>13;
g_mips_core.dcache_lines_per_way = 64 * m_pow(2, val);
val = (cp0_config1 & (7<<10))>>10;
g_mips_core.dcache_line_size = 2 * m_pow(2, val);
val = (cp0_config1 & (7<<7))>>7;
g_mips_core.dcache_ways = val + 1;
val = (cp0_config1 & (0x3F<<25))>>25;
g_mips_core.max_tlb_entries = val + 1;
}
}

View File

@ -0,0 +1,821 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-10 bernard porting to AM1808
* 2020-07-26 lizhirui porting to ls2k
*/
#include <rtthread.h>
#include <rthw.h>
#include <board.h>
#include <stddef.h>
#include "cache.h"
#include "mips_mmu.h"
#include "mmu.h"
void *current_mmu_table = RT_NULL;
void *mmu_table_get()
{
return current_mmu_table;
}
void switch_mmu(void *mmu_table)
{
current_mmu_table = mmu_table;
mmu_clear_tlb();
mmu_clear_itlb();
}
/* dump 2nd level page table */
void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
{
int i;
int fcnt = 0;
for (i = 0; i < 256; i++)
{
rt_uint32_t pte2 = ptb[i];
if ((pte2 & 0x3) == 0)
{
if (fcnt == 0)
rt_kprintf(" ");
rt_kprintf("%04x: ", i);
fcnt++;
if (fcnt == 16)
{
rt_kprintf("fault\n");
fcnt = 0;
}
continue;
}
if (fcnt != 0)
{
rt_kprintf("fault\n");
fcnt = 0;
}
rt_kprintf(" %04x: %x: ", i, pte2);
if ((pte2 & 0x3) == 0x1)
{
rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
((pte2 >> 7) | (pte2 >> 4))& 0xf,
(pte2 >> 15) & 0x1,
((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
}
else
{
rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
}
}
}
void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
{
int i;
int fcnt = 0;
rt_kprintf("page table@%p\n", ptb);
for (i = 0; i < 1024*4; i++)
{
rt_uint32_t pte1 = ptb[i];
if ((pte1 & 0x3) == 0)
{
rt_kprintf("%03x: ", i);
fcnt++;
if (fcnt == 16)
{
rt_kprintf("fault\n");
fcnt = 0;
}
continue;
}
if (fcnt != 0)
{
rt_kprintf("fault\n");
fcnt = 0;
}
rt_kprintf("%03x: %08x: ", i, pte1);
if ((pte1 & 0x3) == 0x3)
{
rt_kprintf("LPAE\n");
}
else if ((pte1 & 0x3) == 0x1)
{
rt_kprintf("pte,ns:%d,domain:%d\n",
(pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
/*
*rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
* - 0x80000000 + 0xC0000000));
*/
}
else if (pte1 & (1 << 18))
{
rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
(pte1 >> 19) & 0x1,
((pte1 >> 13) | (pte1 >> 10))& 0xf,
(pte1 >> 4) & 0x1,
((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
}
else
{
rt_kprintf("section,ns:%d,ap:%x,"
"xn:%d,texcb:%02x,domain:%d\n",
(pte1 >> 19) & 0x1,
((pte1 >> 13) | (pte1 >> 10))& 0xf,
(pte1 >> 4) & 0x1,
(((pte1 & (0x7 << 12)) >> 10) |
((pte1 & 0x0c) >> 2)) & 0x1f,
(pte1 >> 5) & 0xf);
}
}
}
/* level1 page table, each entry for 1MB memory. */
volatile unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
rt_uint32_t vaddrEnd,
rt_uint32_t paddrStart,
rt_uint32_t attr)
{
volatile rt_uint32_t *pTT;
volatile int i, nSec;
pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
for(i = 0; i <= nSec; i++)
{
*pTT = attr | (((paddrStart >> 20) + i) << 20);
pTT++;
}
}
unsigned long rt_hw_set_domain_register(unsigned long domain_val)
{
unsigned long old_domain;
//asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
//asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
return old_domain;
}
void rt_hw_cpu_dcache_clean(void *addr, int size);
void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
{
/* set page table */
for(; size > 0; size--)
{
rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
mdesc->paddr_start, mdesc->attr);
mdesc++;
}
rt_hw_cpu_dcache_clean((void*)MMUTable, sizeof MMUTable);
}
void rt_hw_mmu_init(void)
{
rt_cpu_dcache_clean_flush();
rt_cpu_icache_flush();
rt_hw_cpu_dcache_disable();
rt_hw_cpu_icache_disable();
rt_cpu_mmu_disable();
/*rt_hw_cpu_dump_page_table(MMUTable);*/
rt_hw_set_domain_register(0x55555555);
rt_cpu_tlb_set(MMUTable);
rt_cpu_mmu_enable();
rt_hw_cpu_icache_enable();
rt_hw_cpu_dcache_enable();
}
/*
mem map
*/
void rt_hw_cpu_dcache_clean(void *addr, int size);
int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
{
size_t l1_off, va_s, va_e;
rt_base_t level;
if (!mmu_info || !vtable)
{
return -1;
}
va_s = (size_t)v_address;
va_e = (size_t)v_address + size - 1;
if ( va_e < va_s)
{
return -1;
}
va_s >>= ARCH_SECTION_SHIFT;
va_e >>= ARCH_SECTION_SHIFT;
if (va_s == 0)
{
return -1;
}
level = rt_hw_interrupt_disable();
for (l1_off = va_s; l1_off <= va_e; l1_off++)
{
size_t v = vtable[l1_off];
if (v & ARCH_MMU_USED_MASK)
{
rt_kprintf("Error:vtable[%d] = 0x%p(is not zero),va_s = 0x%p,va_e = 0x%p!\n",l1_off,v,va_s,va_e);
rt_hw_interrupt_enable(level);
return -1;
}
}
mmu_info->vtable = vtable;
mmu_info->vstart = va_s;
mmu_info->vend = va_e;
mmu_info->pv_off = pv_off;
rt_hw_interrupt_enable(level);
return 0;
}
static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
{
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t find_off = 0;
size_t find_va = 0;
int n = 0;
if (!pages)
{
return 0;
}
if (!mmu_info)
{
return 0;
}
for (l1_off = mmu_info->vstart; l1_off <= mmu_info->vend; l1_off++)
{
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
for (l2_off = 0; l2_off < (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE); l2_off++)
{
if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
{
/* in use */
n = 0;
}
else
{
if (!n)
{
find_va = l1_off;
find_off = l2_off;
}
n++;
if (n >= pages)
{
return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
}
}
}
}
else
{
if (!n)
{
find_va = l1_off;
find_off = 0;
}
n += (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
if (n >= pages)
{
return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
}
}
}
return 0;
}
#ifdef RT_USING_SMART
static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
{
size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
if (!pages)
{
return -1;
}
if (!mmu_info)
{
return -1;
}
while (pages--)
{
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
{
return -1;
}
}
loop_va += ARCH_PAGE_SIZE;
}
return 0;
}
#endif
static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages)
{
size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t *ref_cnt;
if (!mmu_info)
{
return;
}
while (npages--)
{
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
{
return;
}
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
}
else
{
return;
}
if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
{
*(mmu_l2 + l2_off) = 0;
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
(*ref_cnt)--;
if (!*ref_cnt)
{
#ifdef RT_USING_SMART
rt_pages_free(mmu_l2, 0);
#else
rt_free_align(mmu_l2);
#endif
*mmu_l1 = 0;
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l1, 4);
}
}
loop_va += ARCH_PAGE_SIZE;
}
}
static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, size_t npages, size_t attr)
{
size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t *ref_cnt;
if (!mmu_info)
{
return -1;
}
while (npages--)
{
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
}
else
{
#ifdef RT_USING_SMART
mmu_l2 = (size_t*)rt_pages_alloc(0);
#else
mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
#endif
if (mmu_l2)
{
rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
*mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l1, 4);
}
else
{
/* error, unmap and quit */
__rt_hw_mmu_unmap(mmu_info, v_addr, npages);
return -1;
}
}
ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
(*ref_cnt)++;
*(mmu_l2 + l2_off) = (loop_pa | attr);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
loop_va += ARCH_PAGE_SIZE;
loop_pa += ARCH_PAGE_SIZE;
}
return 0;
}
static void rt_hw_cpu_tlb_invalidate(void)
{
mmu_clear_tlb();
mmu_clear_itlb();
}
#ifdef RT_USING_SMART
void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
{
size_t pa_s, pa_e;
size_t vaddr;
int pages;
int ret;
if (!size)
{
return 0;
}
pa_s = (size_t)p_addr;
pa_e = (size_t)p_addr + size - 1;
pa_s >>= ARCH_PAGE_SHIFT;
pa_e >>= ARCH_PAGE_SHIFT;
pages = pa_e - pa_s + 1;
if (v_addr)
{
vaddr = (size_t)v_addr;
pa_s = (size_t)p_addr;
if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
{
return 0;
}
vaddr &= ~ARCH_PAGE_MASK;
if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
{
return 0;
}
}
else
{
vaddr = find_vaddr(mmu_info, pages);
}
if (vaddr) {
ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
if (ret == 0)
{
rt_hw_cpu_tlb_invalidate();
return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
}
}
return 0;
}
#else
void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
{
size_t pa_s, pa_e;
size_t vaddr;
int pages;
int ret;
pa_s = (size_t)p_addr;
pa_e = (size_t)p_addr + size - 1;
pa_s >>= ARCH_PAGE_SHIFT;
pa_e >>= ARCH_PAGE_SHIFT;
pages = pa_e - pa_s + 1;
vaddr = find_vaddr(mmu_info, pages);
if (vaddr) {
ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
if (ret == 0)
{
rt_hw_cpu_tlb_invalidate();
return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
}
}
return 0;
}
#endif
#ifdef RT_USING_SMART
static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
{
size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
size_t loop_pa;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t *ref_cnt;
if (!mmu_info)
{
return -1;
}
while (npages--)
{
loop_pa = (size_t)rt_pages_alloc(0) + mmu_info->pv_off;
if (!loop_pa)
goto err;
//rt_kprintf("vaddr = %08x is mapped to paddr = %08x\n",v_addr,loop_pa);
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
}
else
{
//mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
mmu_l2 = (size_t*)rt_pages_alloc(0);
if (mmu_l2)
{
rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
*mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l1, 4);
}
else
goto err;
}
ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
(*ref_cnt)++;
//loop_pa += mmu_info->pv_off;
*(mmu_l2 + l2_off) = (loop_pa | attr);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
loop_va += ARCH_PAGE_SIZE;
}
return 0;
err:
{
/* error, unmap and quit */
int i;
void *va, *pa;
va = (void*)((size_t)v_addr & ~ARCH_PAGE_MASK);
for (i = 0; i < npages; i++)
{
pa = rt_hw_mmu_v2p(mmu_info, va);
pa -= mmu_info->pv_off;
rt_pages_free(pa, 0);
va += ARCH_PAGE_SIZE;
}
__rt_hw_mmu_unmap(mmu_info, v_addr, npages);
return -1;
}
}
void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
{
size_t vaddr;
size_t offset;
int pages;
int ret;
if (!size)
{
return 0;
}
offset = (size_t)v_addr & ARCH_PAGE_MASK;
size += (offset + ARCH_PAGE_SIZE - 1);
pages = (size >> ARCH_PAGE_SHIFT);
if (v_addr)
{
vaddr = (size_t)v_addr;
vaddr &= ~ARCH_PAGE_MASK;
if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
{
return 0;
}
}
else
{
vaddr = find_vaddr(mmu_info, pages);
}
if (vaddr) {
ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
if (ret == 0)
{
rt_hw_cpu_tlb_invalidate();
return (void*)vaddr + offset;
}
}
return 0;
}
#endif
void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
{
size_t va_s, va_e;
int pages;
va_s = (size_t)v_addr;
va_e = (size_t)v_addr + size - 1;
va_s >>= ARCH_PAGE_SHIFT;
va_e >>= ARCH_PAGE_SHIFT;
pages = va_e - va_s + 1;
__rt_hw_mmu_unmap(mmu_info, v_addr, pages);
rt_hw_cpu_tlb_invalidate();
}
//va --> pa
void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size)
{
void *p_addr = 0;
return p_addr;
}
//pa --> va
void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
{
void *v_addr = 0;
#ifdef RT_USING_SMART
extern rt_mmu_info mmu_info;
v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
#else
v_addr = p_addr;
#endif
return v_addr;
}
#ifdef RT_USING_SMART
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
{
void *ret;
rt_base_t level;
level = rt_hw_interrupt_disable();
ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
rt_hw_interrupt_enable(level);
return ret;
}
void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
{
void *ret;
rt_base_t level;
level = rt_hw_interrupt_disable();
ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
rt_hw_interrupt_enable(level);
return ret;
}
#endif
void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
_rt_hw_mmu_unmap(mmu_info, v_addr, size);
rt_hw_interrupt_enable(level);
}
void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
{
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t tmp;
size_t pa;
l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
if (!mmu_info)
{
return (void*)0;
}
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
tmp = *mmu_l1;
switch (tmp & ARCH_MMU_USED_MASK)
{
case 0: /* not used */
break;
case 1: /* page table */
mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
pa = *(mmu_l2 + l2_off);
if (pa & ARCH_MMU_USED_MASK)
{
if ((pa & ARCH_MMU_USED_MASK) == 1)
{
/* lage page, not support */
break;
}
pa &= ~(ARCH_PAGE_MASK);
pa += ((size_t)v_addr & ARCH_PAGE_MASK);
return (void*)pa;
}
break;
case 2:
case 3:
/* section */
if (tmp & ARCH_TYPE_SUPERSECTION)
{
/* super section, not support */
break;
}
pa = (tmp & ~ARCH_SECTION_MASK);
pa += ((size_t)v_addr & ARCH_SECTION_MASK);
return (void*)pa;
}
return (void*)0;
}
void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
{
void *ret;
rt_base_t level;
level = rt_hw_interrupt_disable();
ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
rt_hw_interrupt_enable(level);
return ret;
}
#ifdef RT_USING_SMART
void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
{
unsigned int va;
for (va = 0; va < 0x1000; va++) {
unsigned int vaddr = (va << 20);
if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size) {
mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
} else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size) {
mtbl[va] = (va << 20) | NORMAL_MEM;
} else {
mtbl[va] = 0;
}
}
}
#endif

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-03-25 quanzhao the first version
* 2020-07-26 lizhirui porting to ls2k
*/
#ifndef __MMU_H_
#define __MMU_H_
#include <rtthread.h>
#define DESC_SEC (0x2)
#define MEMWBWA ((1<<12)|(3<<2)) /* write back, write allocate */
#define MEMWB (3<<2) /* write back, no write allocate */
#define MEMWT (2<<2) /* write through, no write allocate */
#define SHAREDEVICE (1<<2) /* shared device */
#define STRONGORDER (0<<2) /* strong ordered */
#define XN (1<<4) /* execute Never */
#ifdef RT_USING_SMART
#define AP_RW (1<<10) /* supervisor=RW, user=No */
#define AP_RO ((1<<10) |(1 << 15)) /* supervisor=RW, user=No */
#else
#define AP_RW (3<<10) /* supervisor=RW, user=RW */
#define AP_RO ((2<<10) /* supervisor=RW, user=RO */
#endif
#define SHARED (1<<16) /* shareable */
#define DOMAIN_FAULT (0x0)
#define DOMAIN_CHK (0x1)
#define DOMAIN_NOTCHK (0x3)
#define DOMAIN0 (0x0<<5)
#define DOMAIN1 (0x1<<5)
#define DOMAIN0_ATTR (DOMAIN_CHK<<0)
#define DOMAIN1_ATTR (DOMAIN_FAULT<<2)
/* device mapping type */
#define DEVICE_MEM (SHARED|AP_RW|DOMAIN0|SHAREDEVICE|DESC_SEC|XN)
/* normal memory mapping type */
#define NORMAL_MEM (SHARED|AP_RW|DOMAIN0|MEMWBWA|DESC_SEC)
#define STRONG_ORDER_MEM (SHARED|AP_RO|XN|DESC_SEC)
struct mem_desc
{
rt_uint32_t vaddr_start;
rt_uint32_t vaddr_end;
rt_uint32_t paddr_start;
rt_uint32_t attr;
};
#define MMU_MAP_MTBL_XN (1<<0)
#define MMU_MAP_MTBL_A (1<<1)
#define MMU_MAP_MTBL_B (1<<2)
#define MMU_MAP_MTBL_C (1<<3)
#define MMU_MAP_MTBL_AP01(x) (x<<4)
#define MMU_MAP_MTBL_TEX(x) (x<<6)
#define MMU_MAP_MTBL_AP2(x) (x<<9)
#define MMU_MAP_MTBL_SHARE (1<<10)
#define MMU_MAP_K_RO (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_RWCB (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_RW (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_DEVICE (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RO (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RWCB (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RW (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_DEVICE (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE)
#define ARCH_SECTION_SHIFT 20
#define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
#define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
#define ARCH_PAGE_SHIFT 14
#define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
#define ARCH_PAGE_TBL_SHIFT 10
#define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
#define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
#define ARCH_MMU_USED_MASK 3
#define ARCH_TYPE_SUPERSECTION (1 << 18)
typedef struct
{
size_t *vtable;
size_t vstart;
size_t vend;
size_t pv_off;
} rt_mmu_info;
void *mmu_table_get();
void switch_mmu(void *mmu_table);
int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
#ifdef RT_USING_SMART
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
#else
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr);
#endif
void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size);
void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size);
#endif

View File

@ -0,0 +1,13 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-05-24 aozima first version
* 2019-07-19 Zhou Yanjie clean up code
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <p32xxxx.h>
#include "../common/mips_def.h"
#include "../common/stackframe.h"
.section ".text", "ax"
.set noat
.set noreorder
/*
* rt_base_t rt_hw_interrupt_disable()
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mfc0 v0, CP0_STATUS /* v0 = status */
addiu v1, zero, -2 /* v1 = 0-2 = 0xFFFFFFFE */
and v1, v0, v1 /* v1 = v0 & 0xFFFFFFFE */
mtc0 v1, CP0_STATUS /* status = v1 */
jr ra
nop
/*
* void rt_hw_interrupt_enable(rt_base_t level)
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
mtc0 a0, CP0_STATUS
jr ra
nop
/*
* void rt_hw_context_switch_to(rt_uint32 to)/*
* a0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
lw sp, 0(a0) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)
* a0 --> from
* a1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
mtc0 ra, CP0_EPC
SAVE_ALL
sw sp, 0(a0) /* store sp in preempted tasks TCB */
lw sp, 0(a1) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
la t0, rt_thread_switch_interrupt_flag
lw t1, 0(t0)
nop
bnez t1, _reswitch
nop
li t1, 0x01 /* set rt_thread_switch_interrupt_flag to 1 */
sw t1, 0(t0)
la t0, rt_interrupt_from_thread /* set rt_interrupt_from_thread */
sw a0, 0(t0)
_reswitch:
la t0, rt_interrupt_to_thread /* set rt_interrupt_to_thread */
sw a1, 0(t0)
/* trigger the soft exception (causes context switch) */
mfc0 t0, CP0_CAUSE /* t0 = Cause */
ori t0, t0, (1<<8) /* t0 |= (1<<8) */
mtc0 t0, CP0_CAUSE /* cause = t0 */
addiu t1, zero, -257 /* t1 = ~(1<<8) */
and t0, t0, t1 /* t0 &= t1 */
mtc0 t0, CP0_CAUSE /* cause = t0 */
jr ra
nop
/*
* void __ISR(_CORE_SOFTWARE_0_VECTOR, ipl2) CoreSW0Handler(void)
*/
.section ".text", "ax"
.set noreorder
.set noat
.ent CoreSW0Handler
.globl CoreSW0Handler
CoreSW0Handler:
SAVE_ALL
/* mCS0ClearIntFlag(); */
la t0, IFS0CLR /* t0 = IFS0CLR */
addiu t1,zero,0x02 /* t1 = (1<<2) */
sw t1, 0(t0) /* IFS0CLR = t1 */
la k0, rt_thread_switch_interrupt_flag
sw zero, 0(k0) /* clear flag */
/*
* switch to the new thread
*/
la k0, rt_interrupt_from_thread
lw k1, 0(k0)
nop
sw sp, 0(k1) /* store sp in preempted tasks's TCB */
la k0, rt_interrupt_to_thread
lw k1, 0(k0)
nop
lw sp, 0(k1) /* get new task's stack pointer */
RESTORE_ALL_AND_RET
.end CoreSW0Handler

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 20011-05-23 aozima the first version for PIC32.
* 20011-09-05 aozima merge all of C source code into cpuport.c.
*/
#include <rtthread.h>
/**
* @addtogroup PIC32
*/
/*@{*/
/* exception and interrupt handler table */
rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
rt_uint32_t rt_thread_switch_interrupt_flag;
rt_uint32_t __attribute__((nomips16)) _get_gp(void)
{
rt_uint32_t result;
// get the gp reg
asm volatile("move %0, $28" : "=r"(result));
return result;
}
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter, rt_uint8_t *stack_addr, void *texit)
{
rt_uint32_t *stk;
/** Start at stack top */
stk = (rt_uint32_t *)stack_addr;
*(stk) = (rt_uint32_t) tentry; /* pc: Entry Point */
*(--stk) = (rt_uint32_t) 0x00800000; /* c0_cause: IV=1, */
*(--stk) = (rt_uint32_t) 0; /* c0_badvaddr */
*(--stk) = (rt_uint32_t) 0; /* lo */
*(--stk) = (rt_uint32_t) 0; /* hi */
*(--stk) = (rt_uint32_t) 1; /* C0_SR: IE = En, */
*(--stk) = (rt_uint32_t) texit; /* 31 ra */
*(--stk) = (rt_uint32_t) 0x0000001e; /* 30 s8 */
*(--stk) = (rt_uint32_t) stack_addr; /* 29 sp */
*(--stk) = (rt_uint32_t) _get_gp(); /* 28 gp */
*(--stk) = (rt_uint32_t) 0x0000001b; /* 27 k1 */
*(--stk) = (rt_uint32_t) 0x0000001a; /* 26 k0 */
*(--stk) = (rt_uint32_t) 0x00000019; /* 25 t9 */
*(--stk) = (rt_uint32_t) 0x00000018; /* 24 t8 */
*(--stk) = (rt_uint32_t) 0x00000017; /* 23 s7 */
*(--stk) = (rt_uint32_t) 0x00000016; /* 22 s6 */
*(--stk) = (rt_uint32_t) 0x00000015; /* 21 s5 */
*(--stk) = (rt_uint32_t) 0x00000014; /* 20 s4 */
*(--stk) = (rt_uint32_t) 0x00000013; /* 19 s3 */
*(--stk) = (rt_uint32_t) 0x00000012; /* 18 s2 */
*(--stk) = (rt_uint32_t) 0x00000011; /* 17 s1 */
*(--stk) = (rt_uint32_t) 0x00000010; /* 16 s0 */
*(--stk) = (rt_uint32_t) 0x0000000f; /* 15 t7 */
*(--stk) = (rt_uint32_t) 0x0000000e; /* 14 t6 */
*(--stk) = (rt_uint32_t) 0x0000000d; /* 13 t5 */
*(--stk) = (rt_uint32_t) 0x0000000c; /* 12 t4 */
*(--stk) = (rt_uint32_t) 0x0000000b; /* 11 t3 */
*(--stk) = (rt_uint32_t) 0x0000000a; /* 10 t2 */
*(--stk) = (rt_uint32_t) 0x00000009; /* 9 t1 */
*(--stk) = (rt_uint32_t) 0x00000008; /* 8 t0 */
*(--stk) = (rt_uint32_t) 0x00000007; /* 7 a3 */
*(--stk) = (rt_uint32_t) 0x00000006; /* 6 a2 */
*(--stk) = (rt_uint32_t) 0x00000005; /* 5 a1 */
*(--stk) = (rt_uint32_t) parameter; /* 4 a0 */
*(--stk) = (rt_uint32_t) 0x00000003; /* 3 v1 */
*(--stk) = (rt_uint32_t) 0x00000002; /* 2 v0 */
*(--stk) = (rt_uint32_t) 0x00000001; /* 1 at */
*(--stk) = (rt_uint32_t) 0x00000000; /* 0 zero */
/* return task's current stack address */
return (rt_uint8_t *)stk;
}
/*@}*/

View File

@ -0,0 +1,98 @@
/*********************************************************************
*
* Generic Exception Handler
*
*********************************************************************
* FileName: exceptions.c
* Dependencies:
*
* Processor: PIC32
*
* Complier: MPLAB C32
* MPLAB IDE
* Company: Microchip Technology, Inc.
* Author: Darren Wenn
*
* Software License Agreement
*
* The software supplied herewith by Microchip Technology Incorporated
* (the “Company”) for its PIC32/PIC24 Microcontroller is intended
* and supplied to you, the Companys customer, for use solely and
* exclusively on Microchip PIC32/PIC24 Microcontroller products.
* The software is owned by the Company and/or its supplier, and is
* protected under applicable copyright laws. All rights are reserved.
* Any use in violation of the foregoing restrictions may subject the
* user to criminal sanctions under applicable laws, as well as to
* civil liability for the breach of the terms and conditions of this
* license.
*
* THIS SOFTWARE IS PROVIDED IN AN “AS IS” CONDITION. NO WARRANTIES,
* WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
* TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
* IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
*
*
********************************************************************/
#include <p32xxxx.h>
// declared static in case exception condition would prevent
// auto variable being created
static enum {
EXCEP_IRQ = 0, // interrupt
EXCEP_AdEL = 4, // address error exception (load or ifetch)
EXCEP_AdES, // address error exception (store)
EXCEP_IBE, // bus error (ifetch)
EXCEP_DBE, // bus error (load/store)
EXCEP_Sys, // syscall
EXCEP_Bp, // breakpoint
EXCEP_RI, // reserved instruction
EXCEP_CpU, // coprocessor unusable
EXCEP_Overflow, // arithmetic overflow
EXCEP_Trap, // trap (possible divide by zero)
EXCEP_IS1 = 16, // implementation specfic 1
EXCEP_CEU, // CorExtend Unuseable
EXCEP_C2E // coprocessor 2
} _excep_code;
static unsigned int _epc_code;
static unsigned int _excep_addr;
#include <rtthread.h>
// this function overrides the normal _weak_ generic handler
void _general_exception_handler(void)
{
asm volatile("mfc0 %0,$13" : "=r" (_excep_code));
asm volatile("mfc0 %0,$14" : "=r" (_excep_addr));
_excep_code = (_excep_code & 0x0000007C) >> 2;
rt_kprintf("\r\n_excep_code : %08X\r\n",_excep_code);
rt_kprintf("_excep_addr : %08X\r\n",_excep_addr);
switch(_excep_code)
{
case EXCEP_IRQ:rt_kprintf("interrupt\r\n");break;
case EXCEP_AdEL:rt_kprintf("address error exception (load or ifetch)\r\n");break;
case EXCEP_AdES:rt_kprintf("address error exception (store)\r\n");break;
case EXCEP_IBE:rt_kprintf("bus error (ifetch)\r\n");break;
case EXCEP_DBE:rt_kprintf("bus error (load/store)\r\n");break;
case EXCEP_Sys:rt_kprintf("syscall\r\n");break;
case EXCEP_Bp:rt_kprintf("breakpoint\r\n");break;
case EXCEP_RI:rt_kprintf("reserved instruction\r\n");break;
case EXCEP_CpU:rt_kprintf("coprocessor unusable\r\n");break;
case EXCEP_Overflow:rt_kprintf("arithmetic overflow\r\n");break;
case EXCEP_Trap:rt_kprintf("trap (possible divide by zero)\r\n");break;
case EXCEP_IS1:rt_kprintf("implementation specfic 1\r\n");break;
case EXCEP_CEU:rt_kprintf("CorExtend Unuseable\r\n");break;
case EXCEP_C2E:rt_kprintf("coprocessor 2\r\n");break;
default : rt_kprintf("unkown exception\r\n");break;
}
while (1) {
// Examine _excep_code to identify the type of exception
// Examine _excep_addr to find the address that caused the exception
}
}