原始版本

This commit is contained in:
冯佳
2025-06-19 21:56:46 +08:00
parent fe98e5f010
commit a4841450cf
4152 changed files with 1910684 additions and 0 deletions

View File

@ -0,0 +1,84 @@
menu "Kernel Testcase"
config UTEST_MEMHEAP_TC
bool "memheap stability test"
default y
depends on RT_USING_MEMHEAP
config UTEST_SMALL_MEM_TC
bool "mem test"
default y
depends on RT_USING_SMALL_MEM
config UTEST_SLAB_TC
bool "slab test"
default n
depends on RT_USING_SLAB
config UTEST_IRQ_TC
bool "IRQ test"
default n
config UTEST_SEMAPHORE_TC
bool "semaphore test"
default n
depends on RT_USING_SEMAPHORE
config UTEST_EVENT_TC
bool "event test"
default n
depends on RT_USING_EVENT
config UTEST_TIMER_TC
bool "timer test"
default n
config UTEST_MESSAGEQUEUE_TC
bool "message queue test"
default n
config UTEST_SIGNAL_TC
bool "signal test"
select RT_USING_SIGNALS
default n
config UTEST_MUTEX_TC
bool "mutex test"
default n
config UTEST_MAILBOX_TC
bool "mailbox test"
default n
config UTEST_THREAD_TC
bool "thread test"
default n
select RT_USING_TIMER_SOFT
select RT_USING_THREAD
config UTEST_DEVICE_TC
bool "device test"
default n
config UTEST_ATOMIC_TC
bool "atomic test"
default n
config UTEST_HOOKLIST_TC
bool "hook list test"
select RT_USING_HOOKLIST
default n
config UTEST_MTSAFE_KPRINT_TC
bool "mtsafe kprint test"
default n
config UTEST_SCHEDULER_TC
bool "scheduler test"
default n
if RT_USING_SMP
rsource "smp/Kconfig"
endif
endmenu

View File

@ -0,0 +1,70 @@
Import('rtconfig')
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
if GetDepend(['UTEST_MEMHEAP_TC']):
src += ['memheap_tc.c']
if GetDepend(['UTEST_SMALL_MEM_TC']):
src += ['mem_tc.c']
if GetDepend(['UTEST_SLAB_TC']):
src += ['slab_tc.c']
if GetDepend(['UTEST_IRQ_TC']):
src += ['irq_tc.c']
if GetDepend(['UTEST_SEMAPHORE_TC']):
src += ['semaphore_tc.c']
if GetDepend(['UTEST_EVENT_TC']):
src += ['event_tc.c']
if GetDepend(['UTEST_TIMER_TC']):
src += ['timer_tc.c']
if GetDepend(['UTEST_MESSAGEQUEUE_TC']):
src += ['messagequeue_tc.c']
if GetDepend(['UTEST_SIGNAL_TC']):
src += ['signal_tc.c']
if GetDepend(['UTEST_MUTEX_TC']):
src += ['mutex_tc.c', 'mutex_pi_tc.c']
if GetDepend(['UTEST_MAILBOX_TC']):
src += ['mailbox_tc.c']
if GetDepend(['UTEST_THREAD_TC']):
src += ['thread_tc.c']
if GetDepend(['UTEST_DEVICE_TC']):
src += ['device_tc.c']
if GetDepend(['UTEST_ATOMIC_TC']):
src += ['atomic_tc.c']
if GetDepend(['UTEST_HOOKLIST_TC']):
src += ['hooklist_tc.c']
if GetDepend(['UTEST_MTSAFE_KPRINT_TC']):
src += ['mtsafe_kprint_tc.c']
# Stressful testcase for scheduler (MP/UP)
if GetDepend(['UTEST_SCHEDULER_TC']):
src += ['sched_timed_sem_tc.c']
src += ['sched_timed_mtx_tc.c']
src += ['sched_mtx_tc.c']
src += ['sched_sem_tc.c', 'sched_thread_tc.c']
group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)
list = os.listdir(cwd)
for item in list:
if os.path.isfile(os.path.join(cwd, item, 'SConscript')):
group = group + SConscript(os.path.join(item, 'SConscript'))
Return('group')

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-07-27 flybreak the first version
* 2023-03-21 WangShun add atomic test
* 2023-09-15 xqyjlj change stack size in cpu64
*/
#include <rtthread.h>
#include "utest.h"
#include "rtatomic.h"
#include <rthw.h>
#define THREAD_PRIORITY 25
#define THREAD_TIMESLICE 1
#define THREAD_STACKSIZE UTEST_THR_STACK_SIZE
/* convenience macro - return either 64-bit or 32-bit value */
#define ATOMIC_WORD(val_if_64, val_if_32) \
((rt_atomic_t)((sizeof(void *) == sizeof(uint64_t)) ? (val_if_64) : (val_if_32)))
static rt_atomic_t count = 0;
static rt_sem_t sem_t;
static void test_atomic_api(void)
{
rt_atomic_t base;
rt_atomic_t oldval;
rt_atomic_t result;
/* rt_atomic_t */
uassert_true(sizeof(rt_atomic_t) == ATOMIC_WORD(sizeof(uint64_t), sizeof(uint32_t)));
/* rt_atomic_add */
base = 0;
result = rt_atomic_add(&base, 10);
uassert_true(base == 10);
uassert_true(result == 0);
/* rt_atomic_add negative */
base = 2;
result = rt_atomic_add(&base, -4);
uassert_true(base == -2);
uassert_true(result == 2);
/* rt_atomic_sub */
base = 11;
result = rt_atomic_sub(&base, 10);
uassert_true(base == 1);
uassert_true(result == 11);
/* rt_atomic_sub negative */
base = 2;
result = rt_atomic_sub(&base, -5);
uassert_true(base == 7);
uassert_true(result == 2);
/* rt_atomic_or */
base = 0xFF00;
result = rt_atomic_or(&base, 0x0F0F);
uassert_true(base == 0xFF0F);
uassert_true(result == 0xFF00);
/* rt_atomic_xor */
base = 0xFF00;
result = rt_atomic_xor(&base, 0x0F0F);
uassert_true(base == 0xF00F);
uassert_true(result == 0xFF00);
/* rt_atomic_and */
base = 0xFF00;
result = rt_atomic_and(&base, 0x0F0F);
uassert_true(base == 0x0F00);
uassert_true(result == 0xFF00);
/* rt_atomic_exchange */
base = 0xFF00;
result = rt_atomic_exchange(&base, 0x0F0F);
uassert_true(base == 0x0F0F);
uassert_true(result == 0xFF00);
/* rt_atomic_flag_test_and_set (Flag 0) */
base = 0x0;
result = rt_atomic_flag_test_and_set(&base);
uassert_true(base == 0x1);
uassert_true(result == 0x0);
/* rt_atomic_flag_test_and_set (Flag 1) */
base = 0x1;
result = rt_atomic_flag_test_and_set(&base);
uassert_true(base == 0x1);
uassert_true(result == 0x1);
/* rt_atomic_flag_clear */
base = 0x1;
rt_atomic_flag_clear(&base);
uassert_true(base == 0x0);
/* rt_atomic_load */
base = 0xFF00;
result = rt_atomic_load(&base);
uassert_true(base == 0xFF00);
uassert_true(result == 0xFF00);
/* rt_atomic_store */
base = 0xFF00;
rt_atomic_store(&base, 0x0F0F);
uassert_true(base == 0x0F0F);
/* rt_atomic_compare_exchange_strong (equal) */
base = 10;
oldval = 10;
result = rt_atomic_compare_exchange_strong(&base, &oldval, 11);
uassert_true(base == 11);
uassert_true(result == 0x1);
/* rt_atomic_compare_exchange_strong (not equal) */
base = 10;
oldval = 5;
result = rt_atomic_compare_exchange_strong(&base, &oldval, 11);
uassert_true(base == 10);
uassert_true(result == 0x0);
}
static void ture_entry(void *parameter)
{
int i;
for (i = 0; i < 1000000; i++)
{
rt_atomic_add(&count, 1);
}
rt_sem_release(sem_t);
}
static void test_atomic_add(void)
{
rt_thread_t thread;
size_t i;
sem_t = rt_sem_create("atomic_sem", 0, RT_IPC_FLAG_PRIO);
rt_atomic_store(&count, 0);
thread = rt_thread_create("t1", ture_entry, RT_NULL, THREAD_STACKSIZE, THREAD_PRIORITY, THREAD_TIMESLICE);
rt_thread_startup(thread);
thread = rt_thread_create("t2", ture_entry, RT_NULL, THREAD_STACKSIZE, THREAD_PRIORITY, THREAD_TIMESLICE);
rt_thread_startup(thread);
thread = rt_thread_create("t3", ture_entry, RT_NULL, THREAD_STACKSIZE, THREAD_PRIORITY, THREAD_TIMESLICE);
rt_thread_startup(thread);
for (i = 0; i < 3; i++)
{
rt_sem_take(sem_t, RT_WAITING_FOREVER);
}
i = rt_atomic_load(&count);
uassert_true(i == 3000000);
}
static rt_err_t utest_tc_init(void)
{
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_atomic_api);
UTEST_UNIT_RUN(test_atomic_add);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.atomic_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-05-20 Shell the first version
*/
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
static void test_rt_device_find(void)
{
char _device_name[RT_NAME_MAX + 1] = {0};
rt_device_t console;
rt_device_t device1, device2, device3;
console = rt_console_get_device();
uassert_not_null(console);
rt_memcpy(_device_name, console->parent.name, RT_NAME_MAX);
/* Test finding a device */
device1 = rt_device_find(_device_name);
uassert_true(device1 == console);
/* Test finding another device */
device2 = rt_device_find(RT_CONSOLE_DEVICE_NAME);
if (rt_strcmp(RT_CONSOLE_DEVICE_NAME, _device_name) == 0)
{
uassert_true(device2 == device1);
}
else
{
uassert_not_null(device2);
uassert_true(device2 != device1);
}
/* Test finding a device 3 */
device3 = rt_device_find(console->parent.name);
uassert_true(device1 == device3);
}
static rt_err_t utest_tc_init(void)
{
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_rt_device_find);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.device.find", utest_tc_init, utest_tc_cleanup, 5);

View File

@ -0,0 +1,346 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-08-15 liukang the first version
* 2023-09-15 xqyjlj change stack size in cpu64
*/
#include <rtthread.h>
#include "utest.h"
#include <stdlib.h>
#define THREAD_STACKSIZE UTEST_THR_STACK_SIZE
#define EVENT_FLAG3 (1 << 3)
#define EVENT_FLAG5 (1 << 5)
static struct rt_event static_event = {0};
#ifdef RT_USING_HEAP
static rt_event_t dynamic_event = RT_NULL;
static rt_uint32_t dynamic_event_recv_thread_finish = 0, dynamic_event_send_thread_finish = 0;
rt_align(RT_ALIGN_SIZE)
static char thread3_stack[UTEST_THR_STACK_SIZE];
static struct rt_thread thread3;
rt_align(RT_ALIGN_SIZE)
static char thread4_stack[UTEST_THR_STACK_SIZE];
static struct rt_thread thread4;
#endif /* RT_USING_HEAP */
static rt_uint32_t recv_event_times1 = 0, recv_event_times2 = 0;
static rt_uint32_t static_event_recv_thread_finish = 0, static_event_send_thread_finish = 0;
rt_align(RT_ALIGN_SIZE)
static char thread1_stack[UTEST_THR_STACK_SIZE];
static struct rt_thread thread1;
rt_align(RT_ALIGN_SIZE)
static char thread2_stack[UTEST_THR_STACK_SIZE];
static struct rt_thread thread2;
#define THREAD_PRIORITY 9
#define THREAD_TIMESLICE 5
static void test_event_init(void)
{
rt_err_t result;
result = rt_event_init(&static_event, "event", RT_IPC_FLAG_PRIO);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_event_detach(&static_event);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_event_init(&static_event, "event", RT_IPC_FLAG_FIFO);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_event_detach(&static_event);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void test_event_detach(void)
{
rt_err_t result = RT_EOK;
result = rt_event_init(&static_event, "event", RT_IPC_FLAG_PRIO);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_event_detach(&static_event);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void thread1_recv_static_event(void *param)
{
rt_uint32_t e;
if (rt_event_recv(&static_event, (EVENT_FLAG3 | EVENT_FLAG5),
RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR,
RT_WAITING_FOREVER, &e) != RT_EOK)
{
return;
}
recv_event_times1 = e;
rt_thread_mdelay(50);
if (rt_event_recv(&static_event, (EVENT_FLAG3 | EVENT_FLAG5),
RT_EVENT_FLAG_AND | RT_EVENT_FLAG_CLEAR,
RT_WAITING_FOREVER, &e) != RT_EOK)
{
return;
}
recv_event_times2 = e;
static_event_recv_thread_finish = 1;
}
static void thread2_send_static_event(void *param)
{
rt_event_send(&static_event, EVENT_FLAG3);
rt_thread_mdelay(10);
rt_event_send(&static_event, EVENT_FLAG5);
rt_thread_mdelay(10);
rt_event_send(&static_event, EVENT_FLAG3);
static_event_send_thread_finish = 1;
}
static void test_static_event_send_recv(void)
{
rt_err_t result = RT_EOK;
result = rt_event_init(&static_event, "event", RT_IPC_FLAG_PRIO);
if (result != RT_EOK)
{
uassert_false(1);
}
rt_thread_init(&thread1,
"thread1",
thread1_recv_static_event,
RT_NULL,
&thread1_stack[0],
sizeof(thread1_stack),
THREAD_PRIORITY - 1, THREAD_TIMESLICE);
rt_thread_startup(&thread1);
rt_thread_init(&thread2,
"thread2",
thread2_send_static_event,
RT_NULL,
&thread2_stack[0],
sizeof(thread2_stack),
THREAD_PRIORITY, THREAD_TIMESLICE);
rt_thread_startup(&thread2);
while (static_event_recv_thread_finish != 1 || static_event_send_thread_finish != 1)
{
rt_thread_delay(1);
}
if (recv_event_times1 == EVENT_FLAG3 && recv_event_times2 == (EVENT_FLAG3 | EVENT_FLAG5))
{
if (rt_event_detach(&static_event) != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
else
{
if (rt_event_detach(&static_event) != RT_EOK)
{
uassert_false(1);
}
uassert_false(1);
}
return;
}
#ifdef RT_USING_HEAP
static void test_event_create(void)
{
rt_err_t result = RT_EOK;
dynamic_event = rt_event_create("dynamic_event", RT_IPC_FLAG_FIFO);
if (dynamic_event == RT_NULL)
{
uassert_false(1);
}
result = rt_event_delete(dynamic_event);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void test_event_delete(void)
{
rt_err_t result;
dynamic_event = rt_event_create("dynamic_event", RT_IPC_FLAG_FIFO);
if (dynamic_event == RT_NULL)
{
uassert_false(1);
}
result = rt_event_delete(dynamic_event);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void thread3_recv_dynamic_event(void *param)
{
rt_uint32_t e;
if (rt_event_recv(dynamic_event, (EVENT_FLAG3 | EVENT_FLAG5),
RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR,
RT_WAITING_FOREVER, &e) != RT_EOK)
{
return;
}
recv_event_times1 = e;
rt_thread_mdelay(50);
if (rt_event_recv(dynamic_event, (EVENT_FLAG3 | EVENT_FLAG5),
RT_EVENT_FLAG_AND | RT_EVENT_FLAG_CLEAR,
RT_WAITING_FOREVER, &e) != RT_EOK)
{
return;
}
recv_event_times2 = e;
dynamic_event_recv_thread_finish = 1;
}
static void thread4_send_dynamic_event(void *param)
{
rt_event_send(dynamic_event, EVENT_FLAG3);
rt_thread_mdelay(10);
rt_event_send(dynamic_event, EVENT_FLAG5);
rt_thread_mdelay(10);
rt_event_send(dynamic_event, EVENT_FLAG3);
dynamic_event_send_thread_finish = 1;
}
static void test_dynamic_event_send_recv(void)
{
dynamic_event = rt_event_create("dynamic_event", RT_IPC_FLAG_PRIO);
if (dynamic_event == RT_NULL)
{
uassert_false(1);
}
rt_thread_init(&thread3,
"thread3",
thread3_recv_dynamic_event,
RT_NULL,
&thread3_stack[0],
sizeof(thread3_stack),
THREAD_PRIORITY - 1, THREAD_TIMESLICE);
rt_thread_startup(&thread3);
rt_thread_init(&thread4,
"thread4",
thread4_send_dynamic_event,
RT_NULL,
&thread4_stack[0],
sizeof(thread4_stack),
THREAD_PRIORITY, THREAD_TIMESLICE);
rt_thread_startup(&thread4);
while (dynamic_event_recv_thread_finish != 1 || dynamic_event_send_thread_finish != 1)
{
rt_thread_delay(1);
}
if (recv_event_times1 == EVENT_FLAG3 && recv_event_times2 == (EVENT_FLAG3 | EVENT_FLAG5))
{
if (rt_event_delete(dynamic_event) != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
else
{
if (rt_event_delete(dynamic_event) != RT_EOK)
{
uassert_false(1);
}
uassert_false(1);
}
return;
}
#endif
static rt_err_t utest_tc_init(void)
{
static_event_recv_thread_finish = 0;
static_event_send_thread_finish = 0;
#ifdef RT_USING_HEAP
dynamic_event_recv_thread_finish = 0;
dynamic_event_send_thread_finish = 0;
#endif
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_event_init);
UTEST_UNIT_RUN(test_event_detach);
UTEST_UNIT_RUN(test_static_event_send_recv);
#ifdef RT_USING_HEAP
UTEST_UNIT_RUN(test_event_create);
UTEST_UNIT_RUN(test_event_delete);
UTEST_UNIT_RUN(test_dynamic_event_send_recv);
#endif
}
UTEST_TC_EXPORT(testcase, "src.ipc.event_tc", utest_tc_init, utest_tc_cleanup, 60);

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-22 Shell Support hook list
*/
#include <rtthread.h>
#include "rtconfig.h"
#include "utest.h"
#include "utest_assert.h"
static int hooker1_ent_count;
static int hooker2_ent_count;
static struct rt_thread thr_tobe_inited;
static void thread_inited_hooker1(rt_thread_t thread)
{
LOG_D("%s: count %d", __func__, hooker1_ent_count);
hooker1_ent_count += 1;
}
RT_OBJECT_HOOKLIST_DEFINE_NODE(rt_thread_inited, hooker1_node, thread_inited_hooker1);
static void thread_inited_hooker2(rt_thread_t thread)
{
LOG_D("%s: count %d", __func__, hooker2_ent_count);
hooker2_ent_count += 1;
}
RT_OBJECT_HOOKLIST_DEFINE_NODE(rt_thread_inited, hooker2_node, thread_inited_hooker2);
static char _thr_stack[UTEST_THR_STACK_SIZE];
static void thr_tobe_inited_entry(void *param)
{
rt_kprintf("Hello!\n");
}
static void hooklist_test(void)
{
hooker1_ent_count = 0;
hooker2_ent_count = 0;
rt_thread_inited_sethook(&hooker1_node);
rt_thread_inited_sethook(&hooker2_node);
/* run 1 */
rt_thread_init(&thr_tobe_inited,
"thr_tobe_inited",
thr_tobe_inited_entry,
NULL,
_thr_stack,
sizeof(_thr_stack),
25,
100);
uassert_int_equal(hooker1_ent_count, 1);
uassert_int_equal(hooker2_ent_count, 1);
rt_thread_detach(&thr_tobe_inited);
rt_thread_mdelay(1); /* wait recycling done */
/* run 2 */
rt_thread_inited_rmhook(&hooker2_node);
rt_thread_init(&thr_tobe_inited,
"thr_tobe_inited",
thr_tobe_inited_entry,
NULL,
_thr_stack,
sizeof(_thr_stack),
25,
100);
uassert_int_equal(hooker1_ent_count, 2);
uassert_int_equal(hooker2_ent_count, 1);
}
static rt_err_t utest_tc_init(void)
{
hooker1_ent_count = 0;
hooker2_ent_count = 0;
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_thread_detach(&thr_tobe_inited);
rt_thread_inited_rmhook(&hooker1_node);
rt_thread_inited_rmhook(&hooker2_node);
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(hooklist_test);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.hooklist_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-08-15 supperthomas add irq_test
*/
#include <rtthread.h>
#include "utest.h"
#include "rthw.h"
#define UTEST_NAME "irq_tc"
static volatile uint32_t irq_count = 0;
static volatile uint32_t max_get_nest_count = 0;
static void irq_callback()
{
if(rt_interrupt_get_nest() > max_get_nest_count)
{
max_get_nest_count = rt_interrupt_get_nest();
}
irq_count ++;
}
static void irq_test(void)
{
irq_count = 0;
rt_interrupt_enter_sethook(irq_callback);
rt_interrupt_leave_sethook(irq_callback);
rt_thread_mdelay(2);
LOG_D("%s test irq_test! irq_count %d max_get_nest_count %d\n", UTEST_NAME, irq_count, max_get_nest_count);
uassert_int_not_equal(0, irq_count);
uassert_int_not_equal(0, max_get_nest_count);
rt_interrupt_enter_sethook(RT_NULL);
rt_interrupt_leave_sethook(RT_NULL);
LOG_D("irq_test OK!\n");
}
static rt_err_t utest_tc_init(void)
{
irq_count = 0;
max_get_nest_count = 0;
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void interrupt_test(void)
{
rt_base_t level;
uint32_t i = 1000;
rt_interrupt_enter_sethook(irq_callback);
rt_interrupt_leave_sethook(irq_callback);
irq_count = 0;
level = rt_hw_interrupt_disable();
while(i)
{
i --;
}
uassert_int_equal(0, irq_count);
rt_hw_interrupt_enable(level);
rt_interrupt_enter_sethook(RT_NULL);
rt_interrupt_leave_sethook(RT_NULL);
}
static void testcase(void)
{
UTEST_UNIT_RUN(irq_test);
UTEST_UNIT_RUN(interrupt_test);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.irq_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,374 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-08 liukang the first version
* 2023-09-15 xqyjlj change stack size in cpu64
*/
#include <rtthread.h>
#include "utest.h"
#include <stdlib.h>
#define THREAD_STACKSIZE UTEST_THR_STACK_SIZE
static struct rt_mailbox test_static_mb;
static char mb_pool[128];
static rt_mailbox_t test_dynamic_mb;
static uint8_t static_mb_recv_thread_finish, static_mb_send_thread_finish;
static uint8_t dynamic_mb_recv_thread_finish, dynamic_mb_send_thread_finish;
rt_align(RT_ALIGN_SIZE)
static char thread1_stack[UTEST_THR_STACK_SIZE];
static struct rt_thread thread1;
rt_align(RT_ALIGN_SIZE)
static char thread2_stack[UTEST_THR_STACK_SIZE];
static struct rt_thread thread2;
#define THREAD_PRIORITY 9
#define THREAD_TIMESLICE 5
static rt_thread_t mb_send = RT_NULL;
static rt_thread_t mb_recv = RT_NULL;
static rt_uint8_t mb_send_str1[] = "this is first mail!";
static rt_uint8_t mb_send_str2[] = "this is second mail!";
static rt_uint8_t mb_send_str3[] = "this is thirdy mail!";
static rt_uint8_t *mb_recv_str1;
static rt_uint8_t *mb_recv_str2;
static rt_uint8_t *mb_recv_str3;
static void test_mailbox_init(void)
{
rt_err_t result;
result = rt_mb_init(&test_static_mb, "mbt", &mb_pool[0], sizeof(mb_pool) / 4, RT_IPC_FLAG_FIFO);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_mb_detach(&test_static_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_mb_init(&test_static_mb, "mbt", &mb_pool[0], sizeof(mb_pool) / 4, RT_IPC_FLAG_PRIO);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_mb_detach(&test_static_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void test_mailbox_deatch(void)
{
rt_err_t result;
result = rt_mb_init(&test_static_mb, "mbt", &mb_pool[0], sizeof(mb_pool) / 4, RT_IPC_FLAG_FIFO);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_mb_detach(&test_static_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_mb_init(&test_static_mb, "mbt", &mb_pool[0], sizeof(mb_pool) / 4, RT_IPC_FLAG_PRIO);
if (result != RT_EOK)
{
uassert_false(1);
}
result = rt_mb_detach(&test_static_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void test_mailbox_create(void)
{
rt_err_t result;
test_dynamic_mb = rt_mb_create("test_dynamic_mb", sizeof(mb_pool) / 4, RT_IPC_FLAG_FIFO);
if (test_dynamic_mb == RT_NULL)
{
uassert_false(1);
}
result = rt_mb_delete(test_dynamic_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
test_dynamic_mb = rt_mb_create("test_dynamic_mb", sizeof(mb_pool) / 4, RT_IPC_FLAG_PRIO);
if (test_dynamic_mb == RT_NULL)
{
uassert_false(1);
}
result = rt_mb_delete(test_dynamic_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void test_mailbox_delete(void)
{
rt_err_t result;
test_dynamic_mb = rt_mb_create("test_dynamic_mb", sizeof(mb_pool) / 4, RT_IPC_FLAG_FIFO);
if (test_dynamic_mb == RT_NULL)
{
uassert_false(1);
}
result = rt_mb_delete(test_dynamic_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
test_dynamic_mb = rt_mb_create("test_dynamic_mb", sizeof(mb_pool) / 4, RT_IPC_FLAG_PRIO);
if (test_dynamic_mb == RT_NULL)
{
uassert_false(1);
}
result = rt_mb_delete(test_dynamic_mb);
if (result != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void thread2_send_static_mb(void *arg)
{
rt_err_t res = RT_EOK;
res = rt_mb_send(&test_static_mb, (rt_ubase_t)&mb_send_str1);
if (res != RT_EOK)
{
uassert_false(1);
}
rt_thread_mdelay(100);
res = rt_mb_send_wait(&test_static_mb, (rt_ubase_t)&mb_send_str2, 10);
if (res != RT_EOK)
{
uassert_false(1);
}
rt_thread_mdelay(100);
res = rt_mb_urgent(&test_static_mb, (rt_ubase_t)&mb_send_str3);
if (res != RT_EOK)
{
uassert_false(1);
}
static_mb_send_thread_finish = 1;
}
static void thread1_recv_static_mb(void *arg)
{
rt_err_t result = RT_EOK;
result = rt_mb_recv(&test_static_mb, (rt_ubase_t *)&mb_recv_str1, RT_WAITING_FOREVER);
if (result != RT_EOK || rt_strcmp((const char *)mb_recv_str1, (const char *)mb_send_str1) != 0)
{
uassert_false(1);
}
result = rt_mb_recv(&test_static_mb, (rt_ubase_t *)&mb_recv_str2, RT_WAITING_FOREVER);
if (result != RT_EOK || rt_strcmp((const char *)mb_recv_str2, (const char *)mb_send_str2) != 0)
{
uassert_false(1);
}
result = rt_mb_recv(&test_static_mb, (rt_ubase_t *)&mb_recv_str3, RT_WAITING_FOREVER);
if (result != RT_EOK || rt_strcmp((const char *)mb_recv_str3, (const char *)mb_send_str3) != 0)
{
uassert_false(1);
}
static_mb_recv_thread_finish = 1;
}
static void test_static_mailbox_send_recv(void)
{
rt_err_t result;
result = rt_mb_init(&test_static_mb, "mbt", &mb_pool[0], sizeof(mb_pool) / 4, RT_IPC_FLAG_FIFO);
if (result != RT_EOK)
{
uassert_false(1);
}
rt_thread_init(&thread1,
"thread1",
thread1_recv_static_mb,
RT_NULL,
&thread1_stack[0],
sizeof(thread1_stack),
THREAD_PRIORITY - 1, THREAD_TIMESLICE);
rt_thread_startup(&thread1);
rt_thread_init(&thread2,
"thread2",
thread2_send_static_mb,
RT_NULL,
&thread2_stack[0],
sizeof(thread2_stack),
THREAD_PRIORITY, THREAD_TIMESLICE);
rt_thread_startup(&thread2);
while (static_mb_recv_thread_finish != 1 || static_mb_send_thread_finish != 1)
{
rt_thread_delay(1);
}
if (rt_mb_detach(&test_static_mb) != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static void thread4_send_dynamic_mb(void *arg)
{
rt_err_t res = RT_EOK;
res = rt_mb_send(test_dynamic_mb, (rt_ubase_t)&mb_send_str1);
if (res != RT_EOK)
{
uassert_false(1);
}
rt_thread_mdelay(100);
res = rt_mb_send_wait(test_dynamic_mb, (rt_ubase_t)&mb_send_str2, 10);
if (res != RT_EOK)
{
uassert_false(1);
}
rt_thread_mdelay(100);
res = rt_mb_urgent(test_dynamic_mb, (rt_ubase_t)&mb_send_str3);
if (res != RT_EOK)
{
uassert_false(1);
}
dynamic_mb_send_thread_finish = 1;
}
static void thread3_recv_dynamic_mb(void *arg)
{
rt_err_t result = RT_EOK;
result = rt_mb_recv(test_dynamic_mb, (rt_ubase_t *)&mb_recv_str1, RT_WAITING_FOREVER);
if (result != RT_EOK || rt_strcmp((const char *)mb_recv_str1, (const char *)mb_send_str1) != 0)
{
uassert_false(1);
}
result = rt_mb_recv(test_dynamic_mb, (rt_ubase_t *)&mb_recv_str2, RT_WAITING_FOREVER);
if (result != RT_EOK || rt_strcmp((const char *)mb_recv_str2, (const char *)mb_send_str2) != 0)
{
uassert_false(1);
}
result = rt_mb_recv(test_dynamic_mb, (rt_ubase_t *)&mb_recv_str3, RT_WAITING_FOREVER);
if (result != RT_EOK || rt_strcmp((const char *)mb_recv_str3, (const char *)mb_send_str3) != 0)
{
uassert_false(1);
}
dynamic_mb_recv_thread_finish = 1;
}
static void test_dynamic_mailbox_send_recv(void)
{
test_dynamic_mb = rt_mb_create("mbt", sizeof(mb_pool) / 4, RT_IPC_FLAG_FIFO);
if (test_dynamic_mb == RT_NULL)
{
uassert_false(1);
}
mb_recv = rt_thread_create("mb_recv_thread",
thread3_recv_dynamic_mb,
RT_NULL,
UTEST_THR_STACK_SIZE,
THREAD_PRIORITY - 1,
THREAD_TIMESLICE);
if (mb_recv == RT_NULL)
{
uassert_false(1);
}
rt_thread_startup(mb_recv);
mb_send = rt_thread_create("mb_send_thread",
thread4_send_dynamic_mb,
RT_NULL,
UTEST_THR_STACK_SIZE,
THREAD_PRIORITY - 1,
THREAD_TIMESLICE);
if (mb_send == RT_NULL)
{
uassert_false(1);
}
rt_thread_startup(mb_send);
while (dynamic_mb_recv_thread_finish != 1 || dynamic_mb_send_thread_finish != 1)
{
rt_thread_delay(1);
}
if (rt_mb_delete(test_dynamic_mb) != RT_EOK)
{
uassert_false(1);
}
uassert_true(1);
}
static rt_err_t utest_tc_init(void)
{
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_mailbox_init);
UTEST_UNIT_RUN(test_mailbox_deatch);
UTEST_UNIT_RUN(test_mailbox_create);
UTEST_UNIT_RUN(test_mailbox_delete);
UTEST_UNIT_RUN(test_static_mailbox_send_recv);
UTEST_UNIT_RUN(test_dynamic_mailbox_send_recv);
}
UTEST_TC_EXPORT(testcase, "src.ipc.mailbox_tc", utest_tc_init, utest_tc_cleanup, 60);

View File

@ -0,0 +1,585 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-14 tyx the first version
*/
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
struct rt_small_mem_item
{
rt_ubase_t pool_ptr; /**< small memory object addr */
rt_size_t next; /**< next free item */
rt_size_t prev; /**< prev free item */
#ifdef RT_USING_MEMTRACE
#ifdef ARCH_CPU_64BIT
rt_uint8_t thread[8]; /**< thread name */
#else
rt_uint8_t thread[4]; /**< thread name */
#endif /* ARCH_CPU_64BIT */
#endif /* RT_USING_MEMTRACE */
};
struct rt_small_mem
{
struct rt_memory parent; /**< inherit from rt_memory */
rt_uint8_t *heap_ptr; /**< pointer to the heap */
struct rt_small_mem_item *heap_end;
struct rt_small_mem_item *lfree;
rt_size_t mem_size_aligned; /**< aligned memory size */
};
#define MEM_SIZE(_heap, _mem) \
(((struct rt_small_mem_item *)(_mem))->next - ((rt_ubase_t)(_mem) - \
(rt_ubase_t)((_heap)->heap_ptr)) - RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE))
#define TEST_MEM_SIZE 1024
static rt_size_t max_block(struct rt_small_mem *heap)
{
struct rt_small_mem_item *mem;
rt_size_t max = 0, size;
for (mem = (struct rt_small_mem_item *)heap->heap_ptr;
mem != heap->heap_end;
mem = (struct rt_small_mem_item *)&heap->heap_ptr[mem->next])
{
if (((rt_ubase_t)mem->pool_ptr & 0x1) == 0)
{
size = MEM_SIZE(heap, mem);
if (size > max)
{
max = size;
}
}
}
return max;
}
static int _mem_cmp(void *ptr, rt_uint8_t v, rt_size_t size)
{
while (size-- != 0)
{
if (*(rt_uint8_t *)ptr != v)
return *(rt_uint8_t *)ptr - v;
}
return 0;
}
struct mem_test_context
{
void *ptr;
rt_size_t size;
rt_uint8_t magic;
};
static void mem_functional_test(void)
{
rt_size_t total_size;
rt_uint8_t *buf;
struct rt_small_mem *heap;
rt_uint8_t magic = __LINE__;
/* Prepare test memory */
buf = rt_malloc(TEST_MEM_SIZE);
uassert_not_null(buf);
uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
rt_memset(buf, 0xAA, TEST_MEM_SIZE);
/* small heap init */
heap = (struct rt_small_mem *)rt_smem_init("mem_tc", buf, TEST_MEM_SIZE);
/* get total size */
total_size = max_block(heap);
uassert_int_not_equal(total_size, 0);
/*
* Allocate all memory at a time and test whether
* the memory allocation release function is effective
*/
{
struct mem_test_context ctx;
ctx.magic = magic++;
ctx.size = max_block(heap);
ctx.ptr = rt_smem_alloc(&heap->parent, ctx.size);
uassert_not_null(ctx.ptr);
rt_memset(ctx.ptr, ctx.magic, ctx.size);
uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
rt_smem_free(ctx.ptr);
uassert_int_equal(max_block(heap), total_size);
}
/*
* Apply for memory release sequentially and
* test whether memory block merging is effective
*/
{
rt_size_t i, max_free = 0;
struct mem_test_context ctx[3];
/* alloc mem */
for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
{
ctx[i].magic = magic++;
ctx[i].size = max_block(heap) / (sizeof(ctx) / sizeof(ctx[0]) - i);
ctx[i].ptr = rt_smem_alloc(&heap->parent, ctx[i].size);
uassert_not_null(ctx[i].ptr);
rt_memset(ctx[i].ptr, ctx[i].magic, ctx[i].size);
}
/* All memory has been applied. The remaining memory should be 0 */
uassert_int_equal(max_block(heap), 0);
/* Verify that the memory data is correct */
for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
{
uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
}
/* Sequential memory release */
for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
{
uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
rt_smem_free(ctx[i].ptr);
max_free += ctx[i].size;
uassert_true(max_block(heap) >= max_free);
}
/* Check whether the memory is fully merged */
uassert_int_equal(max_block(heap), total_size);
}
/*
* Apply for memory release at an interval to
* test whether memory block merging is effective
*/
{
rt_size_t i, max_free = 0;
struct mem_test_context ctx[3];
/* alloc mem */
for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
{
ctx[i].magic = magic++;
ctx[i].size = max_block(heap) / (sizeof(ctx) / sizeof(ctx[0]) - i);
ctx[i].ptr = rt_smem_alloc(&heap->parent, ctx[i].size);
uassert_not_null(ctx[i].ptr);
rt_memset(ctx[i].ptr, ctx[i].magic, ctx[i].size);
}
/* All memory has been applied. The remaining memory should be 0 */
uassert_int_equal(max_block(heap), 0);
/* Verify that the memory data is correct */
for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
{
uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
}
/* Release even address */
for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
{
if (i % 2 == 0)
{
uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
rt_smem_free(ctx[i].ptr);
uassert_true(max_block(heap) >= ctx[0].size);
}
}
/* Release odd addresses and merge memory blocks */
for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
{
if (i % 2 != 0)
{
uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
rt_smem_free(ctx[i].ptr);
max_free += ctx[i - 1].size + ctx[i + 1].size;
uassert_true(max_block(heap) >= max_free);
}
}
/* Check whether the memory is fully merged */
uassert_int_equal(max_block(heap), total_size);
}
/* mem realloc test,Small - > Large */
{
/* Request a piece of memory for subsequent reallocation operations */
struct mem_test_context ctx[3];
ctx[0].magic = magic++;
ctx[0].size = max_block(heap) / 3;
ctx[0].ptr = rt_smem_alloc(&heap->parent, ctx[0].size);
uassert_not_null(ctx[0].ptr);
rt_memset(ctx[0].ptr, ctx[0].magic, ctx[0].size);
/* Apply for a small piece of memory and split the continuous memory */
ctx[1].magic = magic++;
ctx[1].size = RT_ALIGN_SIZE;
ctx[1].ptr = rt_smem_alloc(&heap->parent, ctx[1].size);
uassert_not_null(ctx[1].ptr);
rt_memset(ctx[1].ptr, ctx[1].magic, ctx[1].size);
/* Check whether the maximum memory block is larger than the first piece of memory */
uassert_true(max_block(heap) > ctx[0].size);
/* Reallocate the first piece of memory */
ctx[2].magic = magic++;
ctx[2].size = max_block(heap);
ctx[2].ptr = rt_smem_realloc(&heap->parent, ctx[0].ptr, ctx[2].size);
uassert_not_null(ctx[2].ptr);
uassert_int_not_equal(ctx[0].ptr, ctx[2].ptr);
uassert_int_equal(_mem_cmp(ctx[2].ptr, ctx[0].magic, ctx[0].size), 0);
rt_memset(ctx[2].ptr, ctx[2].magic, ctx[2].size);
/* Free the second piece of memory */
uassert_int_equal(_mem_cmp(ctx[1].ptr, ctx[1].magic, ctx[1].size), 0);
rt_smem_free(ctx[1].ptr);
/* Free reallocated memory */
uassert_int_equal(_mem_cmp(ctx[2].ptr, ctx[2].magic, ctx[2].size), 0);
rt_smem_free(ctx[2].ptr);
/* Check memory integrity */
uassert_int_equal(max_block(heap), total_size);
}
/* mem realloc test,Large - > Small */
{
rt_size_t max_free;
struct mem_test_context ctx;
/* alloc a piece of memory */
ctx.magic = magic++;
ctx.size = max_block(heap) / 2;
ctx.ptr = rt_smem_alloc(&heap->parent, ctx.size);
uassert_not_null(ctx.ptr);
rt_memset(ctx.ptr, ctx.magic, ctx.size);
uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
/* Get remaining memory */
max_free = max_block(heap);
/* Change memory size */
ctx.size = ctx.size / 2;
uassert_int_equal((rt_ubase_t)rt_smem_realloc(&heap->parent, ctx.ptr, ctx.size), (rt_ubase_t)ctx.ptr);
/* Get remaining size */
uassert_true(max_block(heap) > max_free);
/* Free memory */
uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
rt_smem_free(ctx.ptr);
/* Check memory integrity */
uassert_int_equal(max_block(heap), total_size);
}
/* mem realloc test,equal */
{
rt_size_t max_free;
struct mem_test_context ctx;
/* alloc a piece of memory */
ctx.magic = magic++;
ctx.size = max_block(heap) / 2;
ctx.ptr = rt_smem_alloc(&heap->parent, ctx.size);
uassert_not_null(ctx.ptr);
rt_memset(ctx.ptr, ctx.magic, ctx.size);
uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
/* Get remaining memory */
max_free = max_block(heap);
/* Do not change memory size */
uassert_int_equal((rt_ubase_t)rt_smem_realloc(&heap->parent, ctx.ptr, ctx.size), (rt_ubase_t)ctx.ptr);
/* Get remaining size */
uassert_true(max_block(heap) == max_free);
/* Free memory */
uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
rt_smem_free(ctx.ptr);
/* Check memory integrity */
uassert_int_equal(max_block(heap), total_size);
}
/* small heap deinit */
rt_smem_detach(&heap->parent);
/* release test resources */
rt_free(buf);
}
struct mem_alloc_context
{
rt_list_t node;
rt_size_t size;
rt_uint8_t magic;
};
struct mem_alloc_head
{
rt_list_t list;
rt_size_t count;
rt_tick_t start;
rt_tick_t end;
rt_tick_t interval;
};
#define MEM_RANG_ALLOC_BLK_MIN 2
#define MEM_RANG_ALLOC_BLK_MAX 5
#define MEM_RANG_ALLOC_TEST_TIME 5
static void mem_alloc_test(void)
{
struct mem_alloc_head head;
rt_uint8_t *buf;
struct rt_small_mem *heap;
rt_size_t total_size, size;
struct mem_alloc_context *ctx;
/* init */
rt_list_init(&head.list);
head.count = 0;
head.start = rt_tick_get();
head.end = rt_tick_get() + rt_tick_from_millisecond(MEM_RANG_ALLOC_TEST_TIME * 1000);
head.interval = (head.end - head.start) / 20;
buf = rt_malloc(TEST_MEM_SIZE);
uassert_not_null(buf);
uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
rt_memset(buf, 0xAA, TEST_MEM_SIZE);
heap = (struct rt_small_mem *)rt_smem_init("mem_tc", buf, TEST_MEM_SIZE);
total_size = max_block(heap);
uassert_int_not_equal(total_size, 0);
/* test run */
while (head.end - head.start < RT_TICK_MAX / 2)
{
if (rt_tick_get() - head.start >= head.interval)
{
head.start = rt_tick_get();
rt_kprintf("#");
}
/* %60 probability to perform alloc operation */
if (rand() % 10 >= 4)
{
size = rand() % MEM_RANG_ALLOC_BLK_MAX + MEM_RANG_ALLOC_BLK_MIN;
size *= sizeof(struct mem_alloc_context);
ctx = rt_smem_alloc(&heap->parent, size);
if (ctx == RT_NULL)
{
if (head.count == 0)
{
break;
}
size = head.count / 2;
while (size != head.count)
{
ctx = rt_list_first_entry(&head.list, struct mem_alloc_context, node);
rt_list_remove(&ctx->node);
if (ctx->size > sizeof(*ctx))
{
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
{
uassert_true(0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_smem_free(ctx);
head.count --;
}
continue;
}
if (RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE) != (rt_ubase_t)ctx)
{
uassert_int_equal(RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE), (rt_ubase_t)ctx);
}
rt_memset(ctx, 0, size);
rt_list_init(&ctx->node);
ctx->size = size;
ctx->magic = rand() & 0xff;
if (ctx->size > sizeof(*ctx))
{
rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
}
rt_list_insert_after(&head.list, &ctx->node);
head.count += 1;
}
else
{
if (!rt_list_isempty(&head.list))
{
ctx = rt_list_first_entry(&head.list, struct mem_alloc_context, node);
rt_list_remove(&ctx->node);
if (ctx->size > sizeof(*ctx))
{
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
{
uassert_true(0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_smem_free(ctx);
head.count --;
}
}
}
while (!rt_list_isempty(&head.list))
{
ctx = rt_list_first_entry(&head.list, struct mem_alloc_context, node);
rt_list_remove(&ctx->node);
if (ctx->size > sizeof(*ctx))
{
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
{
uassert_true(0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_smem_free(ctx);
head.count --;
}
uassert_int_equal(head.count, 0);
uassert_int_equal(max_block(heap), total_size);
/* small heap deinit */
rt_smem_detach(&heap->parent);
/* release test resources */
rt_free(buf);
}
#define MEM_RANG_REALLOC_BLK_MIN 0
#define MEM_RANG_REALLOC_BLK_MAX 5
#define MEM_RANG_REALLOC_TEST_TIME 5
struct mem_realloc_context
{
rt_size_t size;
rt_uint8_t magic;
};
struct mem_realloc_head
{
struct mem_realloc_context **ctx_tab;
rt_size_t count;
rt_tick_t start;
rt_tick_t end;
rt_tick_t interval;
};
static void mem_realloc_test(void)
{
struct mem_realloc_head head;
rt_uint8_t *buf;
struct rt_small_mem *heap;
rt_size_t total_size, size, idx;
struct mem_realloc_context *ctx;
int res;
size = RT_ALIGN(sizeof(struct mem_realloc_context), RT_ALIGN_SIZE) + RT_ALIGN_SIZE;
size = TEST_MEM_SIZE / size;
/* init */
head.ctx_tab = RT_NULL;
head.count = size;
head.start = rt_tick_get();
head.end = rt_tick_get() + rt_tick_from_millisecond(MEM_RANG_ALLOC_TEST_TIME * 1000);
head.interval = (head.end - head.start) / 20;
buf = rt_malloc(TEST_MEM_SIZE);
uassert_not_null(buf);
uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
rt_memset(buf, 0xAA, TEST_MEM_SIZE);
heap = (struct rt_small_mem *)rt_smem_init("mem_tc", buf, TEST_MEM_SIZE);
total_size = max_block(heap);
uassert_int_not_equal(total_size, 0);
/* init ctx tab */
size = head.count * sizeof(struct mem_realloc_context *);
head.ctx_tab = rt_smem_alloc(&heap->parent, size);
uassert_not_null(head.ctx_tab);
rt_memset(head.ctx_tab, 0, size);
/* test run */
while (head.end - head.start < RT_TICK_MAX / 2)
{
if (rt_tick_get() - head.start >= head.interval)
{
head.start = rt_tick_get();
rt_kprintf("#");
}
size = rand() % MEM_RANG_ALLOC_BLK_MAX + MEM_RANG_ALLOC_BLK_MIN;
size *= sizeof(struct mem_realloc_context);
idx = rand() % head.count;
ctx = rt_smem_realloc(&heap->parent, head.ctx_tab[idx], size);
if (ctx == RT_NULL)
{
if (size == 0)
{
if (head.ctx_tab[idx])
{
head.ctx_tab[idx] = RT_NULL;
}
}
else
{
for (idx = 0; idx < head.count; idx++)
{
ctx = head.ctx_tab[idx];
if (rand() % 2 && ctx)
{
if (ctx->size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
if (res != 0)
{
uassert_int_equal(res, 0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_smem_realloc(&heap->parent, ctx, 0);
head.ctx_tab[idx] = RT_NULL;
}
}
}
continue;
}
/* check mem */
if (head.ctx_tab[idx] != RT_NULL)
{
res = 0;
if (ctx->size < size)
{
if (ctx->size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
}
}
else
{
if (size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, size - sizeof(*ctx));
}
}
if (res != 0)
{
uassert_int_equal(res, 0);
}
}
/* init mem */
ctx->magic = rand() & 0xff;
ctx->size = size;
if (ctx->size > sizeof(*ctx))
{
rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
}
head.ctx_tab[idx] = ctx;
}
/* free all mem */
for (idx = 0; idx < head.count; idx++)
{
ctx = head.ctx_tab[idx];
if (ctx == RT_NULL)
{
continue;
}
if (ctx->size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
if (res != 0)
{
uassert_int_equal(res, 0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_smem_realloc(&heap->parent, ctx, 0);
head.ctx_tab[idx] = RT_NULL;
}
uassert_int_not_equal(max_block(heap), total_size);
/* small heap deinit */
rt_smem_detach(&heap->parent);
/* release test resources */
rt_free(buf);
}
static rt_err_t utest_tc_init(void)
{
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(mem_functional_test);
UTEST_UNIT_RUN(mem_alloc_test);
UTEST_UNIT_RUN(mem_realloc_test);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.mem_tc", utest_tc_init, utest_tc_cleanup, 20);

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-01-16 flybreak the first version
*/
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#define HEAP_SIZE (64 * 1024)
#define HEAP_ALIGN (4)
#define SLICE_NUM (40)
#define TEST_TIMES (100000)
#define HEAP_NAME "heap1"
#define SLICE_SIZE_MAX (HEAP_SIZE/SLICE_NUM)
static void memheap_test(void)
{
struct rt_memheap heap1;
void * ptr_start;
void *ptr[SLICE_NUM];
int i, cnt = 0;
/* init heap */
ptr_start = rt_malloc_align(HEAP_SIZE, HEAP_ALIGN);
if (ptr_start == RT_NULL)
{
rt_kprintf("totle size too big,can not malloc memory!");
return;
}
rt_memheap_init(&heap1, HEAP_NAME, ptr_start, HEAP_SIZE);
/* test start */
for (i = 0; i < SLICE_NUM; i++)
{
ptr[i] = 0;
}
/* test alloc */
for (i = 0; i < SLICE_NUM; i++)
{
rt_uint32_t slice_size = rand() % SLICE_SIZE_MAX;
ptr[i] = rt_memheap_alloc(&heap1, slice_size);
}
/* test realloc */
while (cnt < TEST_TIMES)
{
rt_uint32_t slice_size = rand() % SLICE_SIZE_MAX;
rt_uint32_t ptr_index = rand() % SLICE_NUM;
rt_uint32_t operation = rand() % 2;
if (ptr[ptr_index])
{
if (operation == 0) /* free and malloc */
{
rt_memheap_free(ptr[ptr_index]);
ptr[ptr_index] = rt_memheap_alloc(&heap1, slice_size);
}
else /* realloc */
{
ptr[ptr_index] = rt_memheap_realloc(&heap1, ptr[ptr_index], slice_size);
}
}
cnt ++;
if (cnt % (TEST_TIMES / 10) == 0)
{
rt_kprintf(">");
}
}
rt_kprintf("test OK!\n");
/* test end */
rt_memheap_detach(&heap1);
rt_free_align((void *)ptr_start);
}
static rt_err_t utest_tc_init(void)
{
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(memheap_test);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.memheap_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,234 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-08-28 Sherman the first version
* 2023-09-15 xqyjlj change stack size in cpu64
* fix in smp
*/
#include <rtthread.h>
#include "utest.h"
#define THREAD_STACKSIZE UTEST_THR_STACK_SIZE
#define MSG_SIZE 4
#define MAX_MSGS 5
static struct rt_messagequeue static_mq;
static rt_uint8_t mq_buf[RT_MQ_BUF_SIZE(MSG_SIZE, MAX_MSGS)];
static struct rt_thread mq_send_thread;
static struct rt_thread mq_recv_thread;
static rt_uint8_t mq_send_stack[UTEST_THR_STACK_SIZE];
static rt_uint8_t mq_recv_stack[UTEST_THR_STACK_SIZE];
static struct rt_event finish_e;
#define MQSEND_FINISH 0x01
#define MQRECV_FINIHS 0x02
#ifdef RT_USING_HEAP
static rt_mq_t dynamic_mq;
#endif /* RT_USING_HEAP */
static void test_mq_init(void)
{
rt_err_t ret;
ret = rt_mq_init(&static_mq,"testmq1", mq_buf, MSG_SIZE, sizeof(mq_buf), RT_IPC_FLAG_FIFO);
uassert_true(ret == RT_EOK);
}
static void test_mq_create(void)
{
#ifdef RT_USING_HEAP
dynamic_mq = rt_mq_create("testmq2", MSG_SIZE, MAX_MSGS, RT_IPC_FLAG_FIFO);
uassert_true(dynamic_mq != RT_NULL);
#endif /* RT_USING_HEAP */
}
static void mq_send_case(rt_mq_t testmq)
{
rt_uint32_t send_buf[MAX_MSGS+1] = {0};
rt_err_t ret = RT_EOK;
for (int var = 0; var < MAX_MSGS; ++var)
{
send_buf[var] = var + 1;
ret = rt_mq_send_wait(testmq, &send_buf[var], sizeof(send_buf[0]), RT_WAITING_FOREVER);
uassert_true(ret == RT_EOK);
}
send_buf[MAX_MSGS] = MAX_MSGS + 1;
ret = rt_mq_send(testmq, &send_buf[MAX_MSGS], sizeof(send_buf[0]));
uassert_true(ret == -RT_EFULL);
ret = rt_mq_send_wait(testmq, &send_buf[MAX_MSGS], sizeof(send_buf[0]), RT_WAITING_FOREVER);
uassert_true(ret == RT_EOK);
while (testmq->entry != 0)
{
rt_thread_delay(100);
}
ret = rt_mq_send(testmq, &send_buf[1], sizeof(send_buf[0]));
uassert_true(ret == RT_EOK);
ret = rt_mq_send(testmq, &send_buf[2], sizeof(send_buf[0]));
uassert_true(ret == RT_EOK);
ret = rt_mq_urgent(testmq, &send_buf[0], sizeof(send_buf[0]));
uassert_true(ret == RT_EOK);
while (testmq->entry != 0)
{
rt_thread_delay(100);
}
#ifdef RT_USING_MESSAGEQUEUE_PRIORITY
ret = rt_mq_send_wait_prio(testmq, &send_buf[3], sizeof(send_buf[0]), 3, 0, RT_UNINTERRUPTIBLE);
uassert_true(ret == RT_EOK);
ret = rt_mq_send_wait_prio(testmq, &send_buf[0], sizeof(send_buf[0]), 0, 0, RT_UNINTERRUPTIBLE);
uassert_true(ret == RT_EOK);
ret = rt_mq_send_wait_prio(testmq, &send_buf[2], sizeof(send_buf[0]), 1, 0, RT_UNINTERRUPTIBLE);
uassert_true(ret == RT_EOK);
ret = rt_mq_send_wait_prio(testmq, &send_buf[4], sizeof(send_buf[0]), 4, 0, RT_UNINTERRUPTIBLE);
uassert_true(ret == RT_EOK);
ret = rt_mq_send_wait_prio(testmq, &send_buf[1], sizeof(send_buf[0]), 1, 0, RT_UNINTERRUPTIBLE);
uassert_true(ret == RT_EOK);
while (testmq->entry != 0)
{
rt_thread_delay(100);
}
#endif
ret = rt_mq_send(testmq, &send_buf[1], sizeof(send_buf[0]));
uassert_true(ret == RT_EOK);
ret = rt_mq_control(testmq, RT_IPC_CMD_RESET, RT_NULL);
uassert_true(ret == RT_EOK);
uassert_true(testmq->entry == 0);
}
static void mq_send_entry(void *param)
{
mq_send_case(&static_mq);
#ifdef RT_USING_HEAP
if(dynamic_mq != RT_NULL)
{
mq_send_case(dynamic_mq);
}
#endif /* RT_USING_HEAP */
rt_event_send(&finish_e, MQSEND_FINISH);
}
static void mq_recv_case(rt_mq_t testmq)
{
rt_uint32_t recv_buf[MAX_MSGS+1] = {0};
rt_ssize_t ret = RT_EOK;
for (int var = 0; var < MAX_MSGS + 1; ++var)
{
ret = rt_mq_recv(testmq, &recv_buf[var], sizeof(recv_buf[0]), RT_WAITING_FOREVER);
uassert_true(ret >= 0);
uassert_true(recv_buf[var] == (var + 1));
}
for (int var = 0; var < 3; ++var)
{
ret = rt_mq_recv(testmq, &recv_buf[var], sizeof(recv_buf[0]), RT_WAITING_FOREVER);
uassert_true(ret >= 0);
uassert_true(recv_buf[var] == (var + 1));
}
#ifdef RT_USING_MESSAGEQUEUE_PRIORITY
rt_int32_t msg_prio;
while (testmq->entry == MAX_MSGS)
{
rt_thread_delay(100);
}
for (int var = 0; var < MAX_MSGS; ++var)
{
ret = rt_mq_recv_prio(testmq, &recv_buf[var], sizeof(recv_buf[0]), &msg_prio, RT_WAITING_FOREVER, RT_UNINTERRUPTIBLE);
rt_kprintf("msg_prio = %d\r\n", msg_prio);
uassert_true(ret >= 0);
uassert_true(recv_buf[var] == (MAX_MSGS - var));
}
#endif
}
static void mq_recv_entry(void *param)
{
mq_recv_case(&static_mq);
#ifdef RT_USING_HEAP
if(dynamic_mq != RT_NULL)
{
mq_recv_case(dynamic_mq);
}
#endif /* RT_USING_HEAP */
rt_event_send(&finish_e, MQRECV_FINIHS);
}
static void test_mq_testcase(void)
{
rt_thread_startup(&mq_send_thread);
rt_thread_startup(&mq_recv_thread);
rt_event_recv(&finish_e, MQSEND_FINISH | MQRECV_FINIHS, RT_EVENT_FLAG_AND, RT_WAITING_FOREVER, RT_NULL);
}
static void test_mq_detach(void)
{
rt_err_t ret = rt_mq_detach(&static_mq);
uassert_true(ret == RT_EOK);
}
static void test_mq_delete(void)
{
#ifdef RT_USING_HEAP
rt_err_t ret = rt_mq_delete(dynamic_mq);
uassert_true(ret == RT_EOK);
#endif /* RT_USING_HEAP */
}
static rt_err_t utest_tc_init(void)
{
rt_err_t ret ;
ret = rt_thread_init(&mq_send_thread, "mq_send", mq_send_entry, RT_NULL, mq_send_stack, sizeof(mq_send_stack), 22, 20);
if(ret != RT_EOK)
return -RT_ERROR;
ret = rt_thread_init(&mq_recv_thread, "mq_recv", mq_recv_entry, RT_NULL, mq_recv_stack, sizeof(mq_recv_stack), 23, 20);
if(ret != RT_EOK)
return -RT_ERROR;
#ifdef RT_USING_SMP
rt_thread_control(&mq_send_thread, RT_THREAD_CTRL_BIND_CPU, (void *)0);
rt_thread_control(&mq_recv_thread, RT_THREAD_CTRL_BIND_CPU, (void *)0);
#endif
ret = rt_event_init(&finish_e, "finish", RT_IPC_FLAG_FIFO);
if(ret != RT_EOK)
return -RT_ERROR;
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_mq_init);
UTEST_UNIT_RUN(test_mq_create);
UTEST_UNIT_RUN(test_mq_testcase);
UTEST_UNIT_RUN(test_mq_detach);
UTEST_UNIT_RUN(test_mq_delete);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.messagequeue_tc", utest_tc_init, utest_tc_cleanup, 1000);

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-25 Shell the first version
*/
#include <rtthread.h>
#include "utest.h"
#define TEST_LOOP_TIMES 20
static struct rt_semaphore _thr_exit_sem;
static void _thread_entry(void *param)
{
for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
{
rt_kprintf("This is thread %p\n", rt_thread_self());
rt_thread_mdelay(1);
}
rt_sem_release(&_thr_exit_sem);
return;
}
#define TEST_THREAD_COUNT 16
static void mtsafe_kprint_tc(void)
{
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
{
rt_thread_t new_thread =
rt_thread_create(
"test",
_thread_entry,
NULL,
UTEST_THR_STACK_SIZE,
UTEST_THR_PRIORITY,
100);
rt_thread_startup(new_thread);
}
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
{
rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
}
}
static rt_err_t utest_tc_init(void)
{
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_thr_exit_sem);
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(mtsafe_kprint_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.mtsafe_kprint", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,353 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#define __RT_IPC_SOURCE__
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#ifdef ARCH_CPU_64BIT
#define THREAD_STACKSIZE 8192
#else
#define THREAD_STACKSIZE 4096
#endif
#define MUTEX_NUM 3
#define THREAD_NUM 5
static struct rt_mutex _mutex[MUTEX_NUM];
static volatile int _sync_flag;
static void test_thread_entry(void *para)
{
while (!_sync_flag)
{
rt_thread_delay(1);
}
rt_ubase_t thread_id = (rt_ubase_t)para;
rt_err_t ret;
rt_thread_mdelay(50 + thread_id * 100);
ret = rt_mutex_take(&_mutex[thread_id % MUTEX_NUM], RT_WAITING_FOREVER);
uassert_true(ret == RT_EOK);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == RT_SCHED_PRIV(rt_thread_self()).init_priority);
if (thread_id == 1)
{
rt_thread_mdelay(100); // wait for main thread re-get _mutex[1]
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 8);
}
ret = rt_mutex_release(&_mutex[thread_id % MUTEX_NUM]);
uassert_true(ret == RT_EOK);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == RT_SCHED_PRIV(rt_thread_self()).init_priority);
_sync_flag ++;
}
static void test_main_thread_entry(void *para)
{
while (!_sync_flag)
{
rt_thread_delay(1);
}
rt_err_t ret;
ret = rt_mutex_take(&_mutex[0], RT_WAITING_FOREVER);
uassert_true(ret == RT_EOK);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 12);
rt_thread_mdelay(100); // wait for t0 take mutex0
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 12);
ret = rt_mutex_take(&_mutex[1], RT_WAITING_FOREVER);
uassert_true(ret == RT_EOK);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 12);
rt_thread_mdelay(100); // wait for t1 take mutex1
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 9);
ret = rt_mutex_take(&_mutex[2], RT_WAITING_FOREVER);
uassert_true(ret == RT_EOK);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 9);
rt_thread_mdelay(100); // wait for t2 take mutex2
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 8);
rt_thread_mdelay(100); // wait for t3 take mutex0
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 7);
rt_thread_mdelay(100); // wait for t4 take mutex1
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 7);
rt_thread_mdelay(100);
rt_mutex_release(&_mutex[0]); // give _mutex0 to t3
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 8);
rt_thread_mdelay(100);
rt_mutex_release(&_mutex[1]); // give _mutex1 to t1
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 8);
rt_thread_mdelay(50);
rt_mutex_take(&_mutex[1], RT_WAITING_FOREVER); // re-get _mutex1, which is hold by t1
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 8);
rt_mutex_release(&_mutex[1]); // give _mutex1 to thread t1
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 8);
rt_thread_mdelay(100);
rt_mutex_release(&_mutex[2]);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 12);
_sync_flag ++;
}
static void test_mutex_pi(void)
{
rt_thread_t t_main;
rt_thread_t t[THREAD_NUM];
rt_uint8_t prio[THREAD_NUM] = {13, 9, 8, 7, 11}; // prio of threads
for (int i = 0; i < MUTEX_NUM; i++)
{
rt_mutex_init(&_mutex[i], "test1", 0);
}
_sync_flag = 0;
t_main = rt_thread_create("t_main", test_main_thread_entry, RT_NULL, THREAD_STACKSIZE, 12, 10000);
uassert_true(t_main != RT_NULL);
rt_thread_startup(t_main);
for (rt_ubase_t i = 0; i < THREAD_NUM; i++)
{
t[i] = rt_thread_create("t", test_thread_entry, (void *)i, THREAD_STACKSIZE, prio[i], 10000);
uassert_true(t[i] != RT_NULL);
rt_thread_startup(t[i]);
}
_sync_flag = 1;
while (_sync_flag != THREAD_NUM + 1 + 1)
{
rt_thread_mdelay(100);
}
for (int i = 0; i < MUTEX_NUM; i++)
{
rt_mutex_detach(&_mutex[i]);
}
}
static struct rt_mutex _timeout_mutex;
static void test_main_timeout_entry(void *para)
{
rt_err_t ret;
ret = rt_mutex_take(&_timeout_mutex, RT_WAITING_FOREVER);
uassert_true(ret == -RT_EOK);
rt_thread_mdelay(100);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 10);
rt_thread_mdelay(100);
uassert_true(RT_SCHED_PRIV(rt_thread_self()).current_priority == 12);
rt_mutex_release(&_timeout_mutex);
_sync_flag ++;
}
static void test_timeout_entry(void *para)
{
rt_err_t ret;
rt_thread_mdelay(50);
ret = rt_mutex_take(&_timeout_mutex, rt_tick_from_millisecond(100));
uassert_true(ret == -RT_ETIMEOUT);
_sync_flag ++;
}
static void test_mutex_pi_timeout(void)
{
_sync_flag = 0;
rt_mutex_init(&_timeout_mutex, "_timeout_mutex", 0);
rt_thread_t t1 = rt_thread_create("t1", test_main_timeout_entry, RT_NULL, THREAD_STACKSIZE, 12, 10000);
uassert_true(t1 != RT_NULL);
rt_thread_startup(t1);
rt_thread_t t2 = rt_thread_create("t2", test_timeout_entry, (void *)t1, THREAD_STACKSIZE, 10, 10000);
uassert_true(t2 != RT_NULL);
rt_thread_startup(t2);
while (_sync_flag != 2)
{
rt_thread_mdelay(100);
}
rt_mutex_detach(&_timeout_mutex);
}
#define TC_THREAD_NUM 4
#define TC_MUTEX_NUM TC_THREAD_NUM
static rt_thread_t t[TC_THREAD_NUM], t_hi_prio;
static struct rt_mutex m[TC_MUTEX_NUM];
static void test_recursive_mutex_depend_entry(void *para)
{
rt_ubase_t id = (rt_ubase_t)para;
rt_mutex_take(&m[id], RT_WAITING_FOREVER);
rt_thread_mdelay(50);
if (id != 0)
{
rt_mutex_take(&m[id - 1], RT_WAITING_FOREVER);
}
if (id == 0)
{
rt_thread_mdelay(250);
rt_mutex_release(&m[id]);
}
else
{
rt_mutex_release(&m[id - 1]);
rt_mutex_release(&m[id]);
}
_sync_flag ++;
}
static void test_recursive_mutex_depend_hi_pri_entry(void *para)
{
rt_thread_mdelay(100);
rt_err_t err = rt_mutex_take(&m[TC_MUTEX_NUM - 1], rt_tick_from_millisecond(100));
uassert_true(err == -RT_ETIMEOUT);
_sync_flag ++;
}
static void test_mutex_pi_recursive_prio_update(void)
{
_sync_flag = 0;
for (int i = 0; i < TC_MUTEX_NUM; i++)
{
rt_mutex_init(&m[i], "test", 0);
}
for (rt_ubase_t i = 0; i < TC_THREAD_NUM; i++)
{
t[i] = rt_thread_create("t", test_recursive_mutex_depend_entry, (void *)i, THREAD_STACKSIZE, 10, 10000);
rt_thread_startup(t[i]);
}
t_hi_prio = rt_thread_create("t", test_recursive_mutex_depend_hi_pri_entry, (void *)RT_NULL, THREAD_STACKSIZE, 3, 10000);
rt_thread_startup(t_hi_prio);
rt_thread_mdelay(150);
for (int i = 0; i < TC_THREAD_NUM; i++)
{
uassert_true(RT_SCHED_PRIV(t[i]).current_priority == 3);
}
rt_thread_mdelay(100);
for (int i = 0; i < TC_THREAD_NUM; i++)
{
uassert_true(RT_SCHED_PRIV(t[i]).current_priority == 10);
}
while (_sync_flag != TC_THREAD_NUM + 1)
{
rt_thread_mdelay(100);
}
for (int i = 0; i < TC_MUTEX_NUM; i++)
{
rt_mutex_detach(&m[i]);
}
_sync_flag ++;
}
static void test_mutex_waiter_to_wakeup_entry(void *para)
{
rt_thread_mdelay(100);
rt_err_t err = rt_mutex_take(&m[TC_MUTEX_NUM - 1], RT_WAITING_FOREVER);
uassert_true(err == -RT_EINTR);
_sync_flag ++;
}
static void wakeup_func(void *para)
{
rt_thread_resume(t_hi_prio);
}
static void test_mutex_pi_wakeup_mutex_waiter(void)
{
struct rt_timer wakeup_timer;
_sync_flag = 0;
for (int i = 0; i < TC_MUTEX_NUM; i++)
{
rt_mutex_init(&m[i], "test", 0);
}
for (rt_ubase_t i = 0; i < TC_THREAD_NUM; i++)
{
t[i] = rt_thread_create("t", test_recursive_mutex_depend_entry, (void *)i, THREAD_STACKSIZE, 10, 10000);
rt_thread_startup(t[i]);
}
t_hi_prio = rt_thread_create("t", test_mutex_waiter_to_wakeup_entry, (void *)RT_NULL, THREAD_STACKSIZE, 3, 10000);
rt_thread_startup(t_hi_prio);
rt_timer_init(&wakeup_timer, "wakeup_timer", wakeup_func, RT_NULL, rt_tick_from_millisecond(200), RT_TIMER_FLAG_ONE_SHOT);
rt_timer_start(&wakeup_timer);
rt_thread_mdelay(150);
for (int i = 0; i < TC_THREAD_NUM; i++)
{
uassert_true(RT_SCHED_PRIV(t[i]).current_priority == 3);
}
rt_thread_mdelay(100);
for (int i = 0; i < TC_THREAD_NUM; i++)
{
uassert_true(RT_SCHED_PRIV(t[i]).current_priority == 10);
}
while (_sync_flag != TC_THREAD_NUM + 1)
{
rt_thread_mdelay(100);
}
for (int i = 0; i < TC_MUTEX_NUM; i++)
{
rt_mutex_detach(&m[i]);
}
rt_timer_detach(&wakeup_timer);
}
static rt_err_t utest_tc_init(void)
{
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_mutex_pi);
UTEST_UNIT_RUN(test_mutex_pi_recursive_prio_update);
UTEST_UNIT_RUN(test_mutex_pi_timeout);
UTEST_UNIT_RUN(test_mutex_pi_wakeup_mutex_waiter);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.mutex_pi_tc", utest_tc_init, utest_tc_cleanup, 1000);
/********************* end of file ************************/

View File

@ -0,0 +1,798 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09.01 luckyzjq the first version
* 2023-09-15 xqyjlj change stack size in cpu64
*/
#define __RT_IPC_SOURCE__
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#ifdef ARCH_CPU_64BIT
#define THREAD_STACKSIZE 8192
#else
#define THREAD_STACKSIZE 4096
#endif
static struct rt_mutex static_mutex;
#ifdef RT_USING_HEAP
static rt_mutex_t dynamic_mutex;
#endif /* RT_USING_HEAP */
static volatile int _sync_flag;
/* init test */
static void test_static_mutex_init(void)
{
rt_err_t result = -RT_ERROR;
result = rt_mutex_init(&static_mutex, "static_mutex", RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
result = rt_mutex_detach(&static_mutex);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
result = rt_mutex_init(&static_mutex, "static_mutex", RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
result = rt_mutex_detach(&static_mutex);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
uassert_true(RT_TRUE);
}
/* static take test */
static void static_mutex_take_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex;
int rand_num = rand() % 0x1000;
mutex = (rt_mutex_t)param;
result = rt_mutex_take(mutex, rand_num);
if (RT_EOK == result)
{
uassert_true(RT_FALSE);
}
_sync_flag++;
}
static void test_static_mutex_take(void)
{
rt_err_t result;
_sync_flag = 0;
result = rt_mutex_init(&static_mutex, "static_mutex", RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
return;
}
/* take mutex and not release */
result = rt_mutex_take(&static_mutex, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
rt_thread_t tid = rt_thread_create("mutex_th",
static_mutex_take_entry,
&static_mutex,
THREAD_STACKSIZE,
10,
10);
if (RT_NULL == tid)
{
uassert_true(RT_FALSE);
return;
}
/* startup thread take second */
rt_thread_startup(tid);
while (_sync_flag != 1)
{
rt_thread_mdelay(10);
}
result = rt_mutex_detach(&static_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
/* static release test */
static void static_mutex_release_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex;
int rand_num = rand() % 0x1000;
mutex = (rt_mutex_t)param;
result = rt_mutex_take(mutex, rand_num);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
_sync_flag++;
}
static void test_static_mutex_release(void)
{
rt_err_t result;
_sync_flag = 0;
result = rt_mutex_init(&static_mutex, "static_mutex", RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
return;
}
result = rt_mutex_release(&static_mutex);
uassert_true(result < 0);
/* take mutex */
result = rt_mutex_take(&static_mutex, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* release mutex */
result = rt_mutex_release(&static_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
rt_thread_t tid = rt_thread_create("mutex_th",
static_mutex_release_entry,
&static_mutex,
THREAD_STACKSIZE,
10,
10);
if (RT_NULL == tid)
{
uassert_true(RT_FALSE);
return;
}
/* startup thread and take mutex second */
rt_thread_startup(tid);
while (_sync_flag != 1)
{
rt_thread_mdelay(10);
}
result = rt_mutex_detach(&static_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
/* static trytake test */
static void static_mutex_trytake_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex;
mutex = (rt_mutex_t)param;
result = rt_mutex_trytake(mutex);
if (RT_EOK == result)
{
uassert_true(RT_FALSE);
}
_sync_flag++;
}
static void test_static_mutex_trytake(void)
{
rt_err_t result;
_sync_flag = 0;
result = rt_mutex_init(&static_mutex, "static_mutex", RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
return;
}
/* take mutex and not release */
result = rt_mutex_take(&static_mutex, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
rt_thread_t tid = rt_thread_create("mutex_th",
static_mutex_trytake_entry,
&static_mutex,
THREAD_STACKSIZE,
10,
10);
if (RT_NULL == tid)
{
uassert_true(RT_FALSE);
return;
}
/* startup thread and trytake mutex second */
rt_thread_startup(tid);
while (_sync_flag != 1)
{
rt_thread_mdelay(10);
}
result = rt_mutex_detach(&static_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
static rt_thread_t tid1 = RT_NULL;
static rt_thread_t tid2 = RT_NULL;
static rt_thread_t tid3 = RT_NULL;
/* static mutex priority reverse test */
static void static_thread1_entry(void *param)
{
/* let system schedule */
rt_thread_mdelay(100);
/* thread3 hode mutex thread2 take mutex */
/* check thread2 and thread3 priority */
if (RT_SCHED_PRIV(tid2).current_priority != RT_SCHED_PRIV(tid3).current_priority)
{
uassert_true(RT_FALSE);
}
else
{
uassert_true(RT_TRUE);
}
_sync_flag++;
}
static void static_thread2_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex = (rt_mutex_t)param;
/* let system schedule */
rt_thread_mdelay(50);
result = rt_mutex_take(mutex, RT_WAITING_FOREVER);
if (result == RT_EOK)
{
rt_mutex_release(mutex);
}
_sync_flag++;
}
static void static_thread3_entry(void *param)
{
rt_tick_t tick;
rt_err_t result;
rt_mutex_t mutex = (rt_mutex_t)param;
result = rt_mutex_take(mutex, RT_WAITING_FOREVER);
if (result != RT_EOK)
{
uassert_true(RT_FALSE);
}
tick = rt_tick_get();
while (rt_tick_get() - tick < (RT_TICK_PER_SECOND / 2));
rt_mutex_release(mutex);
_sync_flag++;
}
static void test_static_pri_reverse(void)
{
rt_err_t result;
tid1 = RT_NULL;
tid2 = RT_NULL;
tid3 = RT_NULL;
_sync_flag = 0;
result = rt_mutex_init(&static_mutex, "static_mutex", RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
return;
}
/* thread1 */
tid1 = rt_thread_create("thread1",
static_thread1_entry,
&static_mutex,
UTEST_THR_STACK_SIZE,
10 - 1,
10);
if (tid1 != RT_NULL)
rt_thread_startup(tid1);
/* thread2 */
tid2 = rt_thread_create("thread2",
static_thread2_entry,
&static_mutex,
UTEST_THR_STACK_SIZE,
10,
10);
if (tid2 != RT_NULL)
rt_thread_startup(tid2);
/* thread3 */
tid3 = rt_thread_create("thread3",
static_thread3_entry,
&static_mutex,
UTEST_THR_STACK_SIZE,
10 + 1,
10);
if (tid3 != RT_NULL)
rt_thread_startup(tid3);
while (_sync_flag != 3)
{
rt_thread_mdelay(10);
}
result = rt_mutex_detach(&static_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
/* create test */
static void test_dynamic_mutex_create(void)
{
rt_err_t result = -RT_ERROR;
/* PRIO mode */
dynamic_mutex = rt_mutex_create("dynamic_mutex", RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_mutex)
{
uassert_true(RT_FALSE);
}
result = rt_mutex_delete(dynamic_mutex);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
/* FIFO mode */
dynamic_mutex = rt_mutex_create("dynamic_mutex", RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_mutex)
{
uassert_true(RT_FALSE);
}
result = rt_mutex_delete(dynamic_mutex);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
uassert_true(RT_TRUE);
}
/* dynamic take test */
static void dynamic_mutex_take_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex;
int rand_num = rand() % 0x1000;
mutex = (rt_mutex_t)param;
result = rt_mutex_take(mutex, rand_num);
if (RT_EOK == result)
{
uassert_true(RT_FALSE);
}
_sync_flag++;
}
static void test_dynamic_mutex_take(void)
{
rt_err_t result;
_sync_flag = 0;
dynamic_mutex = rt_mutex_create("dynamic_mutex", RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_mutex)
{
uassert_true(RT_FALSE);
return;
}
/* take mutex and not release */
result = rt_mutex_take(dynamic_mutex, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
rt_thread_t tid = rt_thread_create("mutex_th",
dynamic_mutex_take_entry,
dynamic_mutex,
THREAD_STACKSIZE,
10,
10);
if (RT_NULL == tid)
{
uassert_true(RT_FALSE);
return;
}
/* startup thread take second */
rt_thread_startup(tid);
while (_sync_flag != 1)
{
rt_thread_mdelay(10);
}
result = rt_mutex_delete(dynamic_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
/* dynamic release test */
static void dynamic_mutex_release_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex;
int rand_num = rand() % 0x1000;
mutex = (rt_mutex_t)param;
result = rt_mutex_take(mutex, rand_num);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
_sync_flag++;
}
static void test_dynamic_mutex_release(void)
{
rt_err_t result;
_sync_flag = 0;
dynamic_mutex = rt_mutex_create("dynamic_mutex", RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_mutex)
{
uassert_true(RT_FALSE);
return;
}
result = rt_mutex_release(dynamic_mutex);
uassert_true(result < 0);
/* take mutex */
result = rt_mutex_take(dynamic_mutex, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* release mutex */
result = rt_mutex_release(dynamic_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
rt_thread_t tid = rt_thread_create("mutex_th",
dynamic_mutex_release_entry,
dynamic_mutex,
THREAD_STACKSIZE,
10,
10);
if (RT_NULL == tid)
{
uassert_true(RT_FALSE);
return;
}
/* startup thread and take mutex second */
rt_thread_startup(tid);
while (_sync_flag != 1)
{
rt_thread_mdelay(10);
}
result = rt_mutex_delete(dynamic_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
/* dynamic trytake test */
static void dynamic_mutex_trytake_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex;
mutex = (rt_mutex_t)param;
result = rt_mutex_trytake(mutex);
if (RT_EOK == result)
{
uassert_true(RT_FALSE);
}
_sync_flag++;
}
static void test_dynamic_mutex_trytake(void)
{
rt_err_t result;
_sync_flag = 0;
dynamic_mutex = rt_mutex_create("dynamic_mutex", RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_mutex)
{
uassert_true(RT_FALSE);
return;
}
/* take mutex and not release */
result = rt_mutex_take(dynamic_mutex, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
rt_thread_t tid = rt_thread_create("mutex_th",
dynamic_mutex_trytake_entry,
dynamic_mutex,
THREAD_STACKSIZE,
10,
10);
if (RT_NULL == tid)
{
uassert_true(RT_FALSE);
return;
}
/* startup thread and trytake mutex second */
rt_thread_startup(tid);
while (_sync_flag != 1)
{
rt_thread_mdelay(10);
}
result = rt_mutex_delete(dynamic_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
/* dynamic mutex priority reverse test */
static void dynamic_thread1_entry(void *param)
{
/* let system schedule */
rt_thread_mdelay(100);
/* thread3 hode mutex thread2 take mutex */
/* check thread2 and thread3 priority */
if (RT_SCHED_PRIV(tid2).current_priority != RT_SCHED_PRIV(tid3).current_priority)
{
uassert_true(RT_FALSE);
}
else
{
uassert_true(RT_TRUE);
}
_sync_flag++;
}
static void dynamic_thread2_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex = (rt_mutex_t)param;
/* let system schedule */
rt_thread_mdelay(50);
result = rt_mutex_take(mutex, RT_WAITING_FOREVER);
if (result == RT_EOK)
{
rt_mutex_release(mutex);
}
_sync_flag++;
}
static void dynamic_thread3_entry(void *param)
{
rt_tick_t tick;
rt_err_t result;
rt_mutex_t mutex = (rt_mutex_t)param;
result = rt_mutex_take(mutex, RT_WAITING_FOREVER);
if (result != RT_EOK)
{
uassert_true(RT_FALSE);
}
tick = rt_tick_get();
while (rt_tick_get() - tick < (RT_TICK_PER_SECOND / 2));
rt_mutex_release(mutex);
_sync_flag++;
}
static void test_dynamic_pri_reverse(void)
{
rt_err_t result;
tid1 = RT_NULL;
tid2 = RT_NULL;
tid3 = RT_NULL;
_sync_flag = 0;
dynamic_mutex = rt_mutex_create("dynamic_mutex", RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_mutex)
{
uassert_true(RT_FALSE);
return;
}
/* thread1 */
tid1 = rt_thread_create("thread1",
dynamic_thread1_entry,
dynamic_mutex,
UTEST_THR_STACK_SIZE,
10 - 1,
10);
if (tid1 != RT_NULL)
rt_thread_startup(tid1);
/* thread2 */
tid2 = rt_thread_create("thread2",
dynamic_thread2_entry,
dynamic_mutex,
UTEST_THR_STACK_SIZE,
10,
10);
if (tid2 != RT_NULL)
rt_thread_startup(tid2);
/* thread3 */
tid3 = rt_thread_create("thread3",
dynamic_thread3_entry,
dynamic_mutex,
UTEST_THR_STACK_SIZE,
10 + 1,
10);
if (tid3 != RT_NULL)
rt_thread_startup(tid3);
while (_sync_flag != 3)
{
rt_thread_mdelay(10);
}
result = rt_mutex_delete(dynamic_mutex);
if (RT_EOK != result)
uassert_true(RT_FALSE);
uassert_true(RT_TRUE);
}
static void recursive_lock_test_entry(void *param)
{
rt_err_t result;
rt_mutex_t mutex = (rt_mutex_t)param;
result = rt_mutex_take(mutex, RT_WAITING_FOREVER);
uassert_true(result == RT_EOK);
uassert_true(_sync_flag == 0);
result = rt_mutex_take(mutex, RT_WAITING_FOREVER);
uassert_true(result == RT_EOK);
_sync_flag++;
}
static void test_recurse_lock(void)
{
rt_err_t result;
_sync_flag = 0;
result = rt_mutex_init(&static_mutex, "static_mutex", RT_IPC_FLAG_PRIO);
uassert_true(result == RT_EOK);
/* take mutex and not release */
result = rt_mutex_take(&static_mutex, RT_WAITING_FOREVER);
uassert_true(result == RT_EOK);
/* take mutex twice */
result = rt_mutex_take(&static_mutex, RT_WAITING_FOREVER);
uassert_true(result == RT_EOK);
rt_thread_t tid = rt_thread_create("mutex_th",
recursive_lock_test_entry,
&static_mutex,
THREAD_STACKSIZE,
10,
10);
_sync_flag = -1;
if (tid != RT_NULL)
rt_thread_startup(tid);
result = rt_mutex_release(&static_mutex);
uassert_true(result == RT_EOK);
_sync_flag = 0;
result = rt_mutex_release(&static_mutex);
uassert_true(result == RT_EOK);
while (_sync_flag != 1)
{
rt_thread_mdelay(10);
}
result = rt_mutex_take(&static_mutex, RT_WAITING_FOREVER);
uassert_true(result == RT_EOK);
result = rt_mutex_detach(&static_mutex);
uassert_true(result == RT_EOK);
}
static rt_err_t utest_tc_init(void)
{
#ifdef RT_USING_HEAP
dynamic_mutex = RT_NULL;
#endif /* RT_USING_HEAP */
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
#ifdef RT_USING_HEAP
dynamic_mutex = RT_NULL;
#endif /* RT_USING_HEAP */
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_static_mutex_init);
UTEST_UNIT_RUN(test_static_mutex_take);
UTEST_UNIT_RUN(test_static_mutex_release);
UTEST_UNIT_RUN(test_static_mutex_trytake);
UTEST_UNIT_RUN(test_static_pri_reverse);
#ifdef RT_USING_HEAP
UTEST_UNIT_RUN(test_dynamic_mutex_create);
UTEST_UNIT_RUN(test_dynamic_mutex_take);
UTEST_UNIT_RUN(test_dynamic_mutex_release);
UTEST_UNIT_RUN(test_dynamic_mutex_trytake);
UTEST_UNIT_RUN(test_dynamic_pri_reverse);
#endif
UTEST_UNIT_RUN(test_recurse_lock);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.mutex_tc", utest_tc_init, utest_tc_cleanup, 1000);
/********************* end of file ************************/

View File

@ -0,0 +1,107 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-01-17 Shell the first version
*/
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
/**
* Stressful Test for Mutex
*/
#define TEST_SECONDS 30
#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
#define TEST_THREAD_COUNTS (RT_CPUS_NR)
#define TEST_PROGRESS_COUNTS (36)
#define TEST_PROGRESS_ON (TEST_LOOP_TICKS/TEST_PROGRESS_COUNTS)
#define TEST_PRIORITY_HIGHEST (UTEST_THR_PRIORITY+1)
#define TEST_RANDOM_LATENCY_MAX (1000 * 1000)
static struct rt_semaphore _thr_exit_sem;
static rt_atomic_t _progress_counter;
static rt_atomic_t _exit_flag;
static struct rt_mutex _racing_lock;
static void test_thread_entry(void *param)
{
while (1)
{
rt_mutex_take(&_racing_lock, RT_WAITING_FOREVER);
rt_mutex_release(&_racing_lock);
if (rt_atomic_load(&_exit_flag))
{
break;
}
}
rt_sem_release(&_thr_exit_sem);
}
static void mutex_stress_tc(void)
{
rt_err_t error;
rt_thread_t tester;
const rt_base_t priority_base = TEST_PRIORITY_HIGHEST;
for (size_t i = 0; i < TEST_THREAD_COUNTS; i++)
{
tester = rt_thread_create(
"tester",
test_thread_entry,
(void *)0,
UTEST_THR_STACK_SIZE,
priority_base + (i % (RT_THREAD_PRIORITY_MAX - TEST_PRIORITY_HIGHEST)),
1);
rt_thread_startup(tester);
}
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
{
rt_thread_delay(1);
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
uassert_true(1);
}
/* trigger exit request for all sub-threads */
rt_atomic_store(&_exit_flag, 1);
/* waiting for sub-threads to exit */
for (size_t i = 0; i < TEST_THREAD_COUNTS; i++)
{
error = rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
uassert_int_equal(error, RT_EOK);
}
}
static rt_err_t utest_tc_init(void)
{
int *pseed = rt_malloc(sizeof(int));
srand(*(int *)pseed);
rt_free(pseed);
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
rt_mutex_init(&_racing_lock, "ipc", RT_IPC_FLAG_PRIO);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_thr_exit_sem);
rt_mutex_detach(&_racing_lock);
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(mutex_stress_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.mutex", utest_tc_init, utest_tc_cleanup, TEST_SECONDS);

View File

@ -0,0 +1,198 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-01-17 Shell the first version
*/
#define __RT_IPC_SOURCE__
#include <rtthread.h>
#include "rthw.h"
#include "utest.h"
#define KERN_TEST_CONFIG_LOOP_TIMES 160
#define KERN_TEST_CONCURRENT_THREADS (RT_CPUS_NR * 2)
#define KERN_TEST_CONFIG_HIGHEST_PRIO 3
#define KERN_TEST_CONFIG_LOWEST_PRIO (RT_THREAD_PRIORITY_MAX - 2)
#define TEST_LEVEL_COUNTS (KERN_TEST_CONFIG_LOWEST_PRIO - KERN_TEST_CONFIG_HIGHEST_PRIO + 1)
#if TEST_LEVEL_COUNTS <= RT_CPUS_NR
#warning for the best of this test, TEST_LEVEL_COUNTS should greater than RT_CPUS_NR
#endif
#if KERN_TEST_CONCURRENT_THREADS < RT_CPUS_NR
#warning for the best of this test, KERN_TEST_CONCURRENT_THREADS should greater than RT_CPUS_NR
#endif
#if KERN_TEST_CONFIG_LOWEST_PRIO >= RT_THREAD_PRIORITY_MAX - 1
#error the thread priority should at least be greater than idle
#endif
static rt_atomic_t _star_counter;
static struct rt_semaphore _thr_exit_sem;
static struct rt_semaphore _level_waiting[TEST_LEVEL_COUNTS];
static rt_thread_t _thread_matrix[TEST_LEVEL_COUNTS][KERN_TEST_CONCURRENT_THREADS];
static rt_atomic_t _load_average[RT_CPUS_NR];
static void _print_char(rt_thread_t thr_self, int character)
{
rt_base_t current_counter;
#ifdef RT_USING_SMP
rt_kprintf("%c%d", character, RT_SCHED_CTX(thr_self).oncpu);
#else
rt_kprintf("%c0", character);
#endif /* RT_USING_SMP */
current_counter = rt_atomic_add(&_star_counter, 1);
if (current_counter % 30 == 0)
{
rt_kprintf("\n");
}
}
static void _stats_load_avg_inc(void)
{
int cpuid;
cpuid = rt_hw_cpu_id();
rt_atomic_add(&_load_average[cpuid], 1);
}
static void _stats_load_avg_print(void)
{
rt_base_t counts = 0;
const rt_base_t total_test_counts = KERN_TEST_CONFIG_LOOP_TIMES * TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS;
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_kprintf("%ld ", _load_average[i]);
counts += _load_average[i];
}
rt_kprintf("\n");
uassert_int_equal(counts, total_test_counts);
}
static void _thread_entry(void *param)
{
int level = (rt_ubase_t)param;
rt_thread_t thr_self = rt_thread_self();
if (level == 0)
{
/* always the first to execute among other working threads */
for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
{
/* notify our consumer */
rt_sem_release(&_level_waiting[level + 1]);
_stats_load_avg_inc();
/* waiting for resource of ours */
rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
}
}
else if (level == TEST_LEVEL_COUNTS - 1)
{
for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
{
/* waiting for our resource first */
rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
_stats_load_avg_inc();
_print_char(thr_self, '*');
rt_thread_delay(1);
/* produce for level 0 worker */
rt_sem_release(&_level_waiting[0]);
}
}
else
{
for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
{
/* waiting for resource of ours */
rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
_stats_load_avg_inc();
/* notify our consumer */
rt_sem_release(&_level_waiting[level + 1]);
}
}
uassert_true(1);
rt_sem_release(&_thr_exit_sem);
return;
}
static void scheduler_tc(void)
{
LOG_I("Test starts...");
for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
{
for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
{
rt_thread_startup(_thread_matrix[i][j]);
}
}
LOG_I("%d threads startup...", TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS);
/* waiting for sub-threads to exit */
for (size_t i = 0; i < TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS; i++)
{
rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
}
/* print load average */
_stats_load_avg_print();
}
static rt_err_t utest_tc_init(void)
{
LOG_I("Setup environment...");
_star_counter = 1;
rt_memset(_load_average, 0, sizeof(_load_average));
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
{
rt_sem_init(&_level_waiting[i], "test", 0, RT_IPC_FLAG_PRIO);
for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
{
_thread_matrix[i][j] =
rt_thread_create("test",
_thread_entry,
(void *)i,
UTEST_THR_STACK_SIZE,
KERN_TEST_CONFIG_HIGHEST_PRIO+i,
5);
if (!_thread_matrix[i][j])
uassert_not_null(_thread_matrix[i][j]);
}
}
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_thr_exit_sem);
for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
{
rt_sem_detach(&_level_waiting[i]);
}
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(scheduler_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.sem", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,121 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-01-25 Shell init ver.
*/
#define __RT_KERNEL_SOURCE__
#include <rtthread.h>
#include "utest.h"
#define TEST_LOOP_TIMES (100 * 1000)
#define TEST_PROGRESS_COUNTS (36)
#define TEST_THREAD_COUNT (RT_CPUS_NR * 1)
#define TEST_PROGRESS_ON (TEST_LOOP_TIMES*TEST_THREAD_COUNT/TEST_PROGRESS_COUNTS)
static struct rt_semaphore _thr_exit_sem;
static rt_atomic_t _progress_counter;
static volatile rt_thread_t threads_group[TEST_THREAD_COUNT][2];
static void _thread_entry1(void *param)
{
rt_base_t critical_level;
size_t idx = (size_t)param;
for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
{
critical_level = rt_enter_critical();
rt_thread_suspend(rt_thread_self());
rt_thread_resume(threads_group[idx][1]);
rt_exit_critical_safe(critical_level);
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
uassert_true(1);
}
rt_sem_release(&_thr_exit_sem);
return;
}
static void _thread_entry2(void *param)
{
rt_base_t critical_level;
size_t idx = (size_t)param;
for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
{
critical_level = rt_enter_critical();
rt_thread_suspend(rt_thread_self());
rt_thread_resume(threads_group[idx][0]);
rt_exit_critical_safe(critical_level);
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
uassert_true(1);
}
rt_sem_release(&_thr_exit_sem);
return;
}
static void scheduler_tc(void)
{
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
{
rt_thread_t t1 =
rt_thread_create(
"t1",
_thread_entry1,
(void *)i,
UTEST_THR_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
100);
rt_thread_t t2 =
rt_thread_create(
"t2",
_thread_entry2,
(void *)i,
UTEST_THR_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
100);
threads_group[i][0] = t1;
threads_group[i][1] = t2;
}
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
{
rt_thread_startup(threads_group[i][0]);
rt_thread_startup(threads_group[i][1]);
}
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
{
rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
}
}
static rt_err_t utest_tc_init(void)
{
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_thr_exit_sem);
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(scheduler_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.thread", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,232 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-01-25 Shell init ver.
*/
#define __RT_KERNEL_SOURCE__
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#define TEST_SECONDS 10
#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
#define TEST_PROGRESS_COUNTS (36)
#define TEST_PROGRESS_ON (TEST_LOOP_TICKS*2/TEST_PROGRESS_COUNTS)
static struct rt_semaphore _thr_exit_sem;
static struct rt_mutex _ipc_primitive;
static struct rt_semaphore _cons_can_take_mtx;
static struct rt_semaphore _prod_can_take_mtx;
static rt_atomic_t _progress_counter;
#define CONSUMER_MAGIC 0x11223344
#define PRODUCER_MAGIC 0x44332211
static rt_atomic_t _last_holder_flag = CONSUMER_MAGIC;
static rt_base_t _timedout_failed_times = 0;
/**
* Test on timedout IPC with racing condition where timedout routine and producer
* thread may race to wakeup sleeper.
*
* This test will fork 2 thread, one producer and one consumer. The producer will
* looping and trigger the IPC on the edge of new tick arrives. The consumer will
* wait on IPC with a timedout of 1 tick.
*/
static void _wait_until_edge(void)
{
rt_tick_t entry_level, current;
rt_base_t random_latency;
entry_level = rt_tick_get();
do
{
current = rt_tick_get();
}
while (current == entry_level);
/* give a random latency for test */
random_latency = rand() % 1000 * 1000;
entry_level = current;
for (size_t i = 0; i < random_latency; i++)
{
current = rt_tick_get();
if (current != entry_level)
break;
}
}
static void _producer_entry(void *param)
{
rt_err_t error;
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
{
/**
* only try to take mutex after consumer have taken it after last
* release from us.
*/
error = rt_sem_take(&_prod_can_take_mtx, RT_WAITING_FOREVER);
if (error)
{
uassert_true(0);
break;
}
error = rt_mutex_take(&_ipc_primitive, RT_WAITING_FOREVER);
if (error)
{
uassert_true(0);
break;
}
/* ensure that mutex should be held in round-robin method */
if (rt_atomic_load(&_last_holder_flag) != CONSUMER_MAGIC)
{
uassert_true(0);
break;
}
else
{
rt_atomic_store(&_last_holder_flag, PRODUCER_MAGIC);
rt_sem_release(&_cons_can_take_mtx);
}
_wait_until_edge();
rt_mutex_release(&_ipc_primitive);
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
uassert_true(1);
}
rt_sem_release(&_thr_exit_sem);
return;
}
static void _consumer_entry(void *param)
{
rt_err_t error;
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
{
/**
* only try to take mutex after producer have taken it after last
* release from us.
*/
error = rt_sem_take(&_cons_can_take_mtx, RT_WAITING_FOREVER);
if (error)
{
uassert_true(0);
break;
}
while (1)
{
error = rt_mutex_take_interruptible(&_ipc_primitive, 1);
if (error == -RT_ETIMEOUT)
{
_timedout_failed_times++;
if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
{
uassert_true(0);
break;
}
}
else
{
break;
}
}
if (error != RT_EOK)
{
uassert_true(0);
break;
}
/* ensure that mutex should be held in round-robin method */
if (rt_atomic_load(&_last_holder_flag) != PRODUCER_MAGIC)
{
uassert_true(0);
break;
}
else
{
rt_atomic_store(&_last_holder_flag, CONSUMER_MAGIC);
rt_sem_release(&_prod_can_take_mtx);
}
rt_mutex_release(&_ipc_primitive);
if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
{
uassert_true(0);
break;
}
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
uassert_true(1);
}
rt_sem_release(&_thr_exit_sem);
return;
}
static void timed_mtx_tc(void)
{
rt_thread_t prod = rt_thread_create(
"prod",
_producer_entry,
(void *)0,
UTEST_THR_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
4);
rt_thread_t cons = rt_thread_create(
"cons",
_consumer_entry,
(void *)0,
UTEST_THR_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
100);
rt_thread_startup(prod);
rt_thread_startup(cons);
for (size_t i = 0; i < 2; i++)
{
uassert_int_equal(
rt_sem_take(&_thr_exit_sem, 4 * TEST_LOOP_TICKS),
RT_EOK);
}
/* Summary */
LOG_I("Total failed times: %ld(in %d)\n", _timedout_failed_times, TEST_LOOP_TICKS);
}
static rt_err_t utest_tc_init(void)
{
_timedout_failed_times = 0;
rt_mutex_init(&_ipc_primitive, "ipc", RT_IPC_FLAG_PRIO);
rt_sem_init(&_cons_can_take_mtx, "test", 0, RT_IPC_FLAG_PRIO);
rt_sem_init(&_prod_can_take_mtx, "test", 1, RT_IPC_FLAG_PRIO);
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_mutex_detach(&_ipc_primitive);
rt_sem_detach(&_cons_can_take_mtx);
rt_sem_detach(&_prod_can_take_mtx);
rt_sem_detach(&_thr_exit_sem);
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(timed_mtx_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.timed_mtx", utest_tc_init, utest_tc_cleanup, TEST_SECONDS * 2);

View File

@ -0,0 +1,149 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-01-25 Shell init ver.
*/
#define __RT_KERNEL_SOURCE__
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#define TEST_SECONDS 10
#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
#define TEST_PROGRESS_COUNTS (36)
#define TEST_PROGRESS_ON (TEST_LOOP_TICKS*2/TEST_PROGRESS_COUNTS)
static struct rt_semaphore _thr_exit_sem;
static struct rt_semaphore _ipc_sem;
static rt_atomic_t _progress_counter;
static rt_base_t _timedout_failed_times = 0;
/**
* Test on timedout IPC with racing condition where timedout routine and producer
* thread may race to wakeup sleeper.
*
* This test will fork 2 thread, one producer and one consumer. The producer will
* looping and trigger the IPC on the edge of new tick arrives. The consumer will
* wait on IPC with a timedout of 1 tick.
*/
static void _wait_until_edge(void)
{
rt_tick_t entry_level, current;
rt_base_t random_latency;
entry_level = rt_tick_get();
do
{
current = rt_tick_get();
}
while (current == entry_level);
/* give a random latency for test */
random_latency = rand();
entry_level = current;
for (size_t i = 0; i < random_latency; i++)
{
current = rt_tick_get();
if (current != entry_level)
break;
}
}
static void _producer_entry(void *param)
{
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
{
_wait_until_edge();
rt_sem_release(&_ipc_sem);
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
uassert_true(1);
}
rt_sem_release(&_thr_exit_sem);
return;
}
static void _consumer_entry(void *param)
{
int error;
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
{
error = rt_sem_take_interruptible(&_ipc_sem, 1);
if (error == -RT_ETIMEOUT)
{
_timedout_failed_times++;
}
else
{
if (error != RT_EOK)
uassert_true(0);
}
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
uassert_true(1);
}
rt_sem_release(&_thr_exit_sem);
return;
}
static void timed_sem_tc(void)
{
rt_thread_t prod = rt_thread_create(
"prod",
_producer_entry,
(void *)0,
UTEST_THR_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
4);
rt_thread_t cons = rt_thread_create(
"cons",
_consumer_entry,
(void *)0,
UTEST_THR_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
100);
rt_thread_startup(prod);
rt_thread_startup(cons);
for (size_t i = 0; i < 2; i++)
{
rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
}
/* Summary */
LOG_I("Total failed times: %ld(in %d)\n", _timedout_failed_times, TEST_LOOP_TICKS);
}
static rt_err_t utest_tc_init(void)
{
int *pseed = rt_malloc(sizeof(int));
srand(*(int *)pseed);
rt_free(pseed);
rt_sem_init(&_ipc_sem, "ipc", 0, RT_IPC_FLAG_PRIO);
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_ipc_sem);
rt_sem_detach(&_thr_exit_sem);
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(timed_sem_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.timed_sem", utest_tc_init, utest_tc_cleanup, TEST_SECONDS * 2);

View File

@ -0,0 +1,558 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-08-12 luckyzjq the first version
*/
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
static struct rt_semaphore static_semaphore;
#ifdef RT_USING_HEAP
static rt_sem_t dynamic_semaphore;
#endif /* RT_USING_HEAP */
static void test_static_semaphore_init(void)
{
rt_err_t result;
int rand_num = rand() % 0x10000;
for (int i = 0; i < rand_num; i++)
{
result = rt_sem_init(&static_semaphore, "static_sem", i, RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
break;
}
rt_sem_detach(&static_semaphore);
result = rt_sem_init(&static_semaphore, "static_sem", i, RT_IPC_FLAG_FIFO);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
break;
}
rt_sem_detach(&static_semaphore);
}
uassert_true(RT_TRUE);
}
static void test_static_semaphore_detach(void)
{
rt_err_t result;
int rand_num = rand() % 0x10000;
for (int i = 0; i < rand_num; i++)
{
result = rt_sem_init(&static_semaphore, "static_sem", i, RT_IPC_FLAG_PRIO);
if (RT_EOK != result)
{
break;
}
result = rt_sem_detach(&static_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
break;
}
result = rt_sem_init(&static_semaphore, "static_sem", i, RT_IPC_FLAG_FIFO);
if (RT_EOK != result)
{
break;
}
result = rt_sem_detach(&static_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
break;
}
}
uassert_true(RT_TRUE);
}
static void test_static_semaphore_take(void)
{
rt_err_t result;
result = rt_sem_init(&static_semaphore, "static_sem", 1, RT_IPC_FLAG_PRIO);
if (RT_EOK == result)
{
/* first take */
result = rt_sem_take(&static_semaphore, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* second take */
result = rt_sem_take(&static_semaphore, 100);
if (-RT_ETIMEOUT != result)
uassert_true(RT_FALSE);
}
else
{
return;
}
rt_sem_detach(&static_semaphore);
uassert_true(RT_TRUE);
return;
}
static void test_static_semaphore_trytake(void)
{
rt_err_t result;
result = rt_sem_init(&static_semaphore, "static_sem", 1, RT_IPC_FLAG_PRIO);
if (RT_EOK == result)
{
/* first take */
result = rt_sem_trytake(&static_semaphore);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* second take */
result = rt_sem_trytake(&static_semaphore);
if (-RT_ETIMEOUT != result)
uassert_true(RT_FALSE);
}
else
{
return;
}
rt_sem_detach(&static_semaphore);
uassert_true(RT_TRUE);
return;
}
static void test_static_semaphore_release(void)
{
rt_err_t result;
result = rt_sem_init(&static_semaphore, "static_sem", 0, RT_IPC_FLAG_PRIO);
if (RT_EOK == result)
{
/* first take */
result = rt_sem_take(&static_semaphore, 100);
if (-RT_ETIMEOUT != result)
uassert_true(RT_FALSE);
/* release */
result = rt_sem_release(&static_semaphore);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* second take */
result = rt_sem_take(&static_semaphore, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
}
else
{
return;
}
rt_sem_detach(&static_semaphore);
uassert_true(RT_TRUE);
return;
}
static void test_static_semaphore_control(void)
{
rt_err_t result;
int value = 0;
value = rand() % 100;
result = rt_sem_init(&static_semaphore, "static_sem", 1, RT_IPC_FLAG_PRIO);
if (RT_EOK == result)
{
result = rt_sem_control(&static_semaphore, RT_IPC_CMD_RESET, &value);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
else
{
return;
}
for (int i = 0; i < value; i++)
{
result = rt_sem_take(&static_semaphore, 10);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
rt_sem_detach(&static_semaphore);
uassert_true(RT_TRUE);
}
static void static_release_isr_hardware_callback(void *param)
{
rt_err_t result;
result = rt_sem_release(&static_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
static void static_release_isr_software_callback(void *param)
{
rt_err_t result;
result = rt_sem_release(&static_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
static void test_static_semaphore_release_isr(void)
{
rt_err_t result;
rt_timer_t hardware_timer;
rt_timer_t software_timer;
/* create timer */
hardware_timer = rt_timer_create("release_isr",
static_release_isr_hardware_callback,
RT_NULL,
100,
RT_TIMER_FLAG_HARD_TIMER | RT_TIMER_FLAG_ONE_SHOT);
software_timer = rt_timer_create("release_isr",
static_release_isr_software_callback,
RT_NULL,
100,
RT_TIMER_FLAG_SOFT_TIMER | RT_TIMER_FLAG_ONE_SHOT);
/* start tiemr */
if (hardware_timer)
rt_timer_start(hardware_timer);
if (software_timer)
rt_timer_start(software_timer);
result = rt_sem_init(&static_semaphore, "static_sem", 0, RT_IPC_FLAG_PRIO);
if (RT_EOK == result)
{
for (int i = 0; i < 2; i++)
{
result = rt_sem_take(&static_semaphore, 1000);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
}
else
{
return;
}
rt_sem_detach(&static_semaphore);
rt_timer_delete(hardware_timer);
rt_timer_delete(software_timer);
uassert_true(RT_TRUE);
}
#ifdef RT_USING_HEAP
static void test_dynamic_semaphore_create(void)
{
int rand_num = rand() % 0x10000;
for (int i = 0; i < rand_num; i++)
{
dynamic_semaphore = rt_sem_create("static_sem", i, RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_semaphore)
{
uassert_true(RT_FALSE);
break;
}
rt_sem_delete(dynamic_semaphore);
dynamic_semaphore = rt_sem_create("static_sem", i, RT_IPC_FLAG_FIFO);
if (RT_NULL == dynamic_semaphore)
{
uassert_true(RT_FALSE);
break;
}
rt_sem_delete(dynamic_semaphore);
}
uassert_true(RT_TRUE);
}
static void test_dynamic_semaphore_delete(void)
{
rt_err_t result;
int rand_num = rand() % 0x10000;
for (int i = 0; i < rand_num; i++)
{
dynamic_semaphore = rt_sem_create("static_sem", i, RT_IPC_FLAG_PRIO);
if (RT_NULL == dynamic_semaphore)
{
break;
}
result = rt_sem_delete(dynamic_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
break;
}
dynamic_semaphore = rt_sem_create("static_sem", i, RT_IPC_FLAG_FIFO);
if (RT_NULL == dynamic_semaphore)
{
break;
}
result = rt_sem_delete(dynamic_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
break;
}
}
uassert_true(RT_TRUE);
}
static void test_dynamic_semaphore_take(void)
{
rt_err_t result;
dynamic_semaphore = rt_sem_create("static_sem", 1, RT_IPC_FLAG_PRIO);
if (RT_NULL != dynamic_semaphore)
{
/* first take */
result = rt_sem_take(dynamic_semaphore, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* second take */
result = rt_sem_take(dynamic_semaphore, 100);
if (-RT_ETIMEOUT != result)
uassert_true(RT_FALSE);
}
else
{
return;
}
rt_sem_delete(dynamic_semaphore);
uassert_true(RT_TRUE);
return;
}
static void test_dynamic_semaphore_trytake(void)
{
rt_err_t result;
dynamic_semaphore = rt_sem_create("static_sem", 1, RT_IPC_FLAG_PRIO);
if (RT_NULL != dynamic_semaphore)
{
/* first take */
result = rt_sem_trytake(dynamic_semaphore);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* second take */
result = rt_sem_trytake(dynamic_semaphore);
if (-RT_ETIMEOUT != result)
uassert_true(RT_FALSE);
}
else
{
return;
}
rt_sem_delete(dynamic_semaphore);
uassert_true(RT_TRUE);
return;
}
static void test_dynamic_semaphore_release(void)
{
rt_err_t result;
dynamic_semaphore = rt_sem_create("static_sem", 0, RT_IPC_FLAG_PRIO);
if (RT_NULL != dynamic_semaphore)
{
/* first take */
result = rt_sem_take(dynamic_semaphore, 100);
if (-RT_ETIMEOUT != result)
uassert_true(RT_FALSE);
/* release */
result = rt_sem_release(dynamic_semaphore);
if (RT_EOK != result)
uassert_true(RT_FALSE);
/* second take */
result = rt_sem_take(dynamic_semaphore, RT_WAITING_FOREVER);
if (RT_EOK != result)
uassert_true(RT_FALSE);
}
else
{
return;
}
rt_sem_delete(dynamic_semaphore);
uassert_true(RT_TRUE);
return;
}
static void test_dynamic_semaphore_control(void)
{
rt_err_t result;
int value = 0;
value = rand() % 100;
dynamic_semaphore = rt_sem_create("static_sem", 1, RT_IPC_FLAG_PRIO);
if (RT_NULL != dynamic_semaphore)
{
result = rt_sem_control(dynamic_semaphore, RT_IPC_CMD_RESET, &value);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
else
{
return;
}
for (int i = 0; i < value; i++)
{
result = rt_sem_take(dynamic_semaphore, 10);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
rt_sem_delete(dynamic_semaphore);
uassert_true(RT_TRUE);
}
static void dynamic_release_isr_hardware_callback(void *param)
{
rt_err_t result;
result = rt_sem_release(dynamic_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
static void dynamic_release_isr_software_callback(void *param)
{
rt_err_t result;
result = rt_sem_release(dynamic_semaphore);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
static void test_dynamic_semaphore_release_isr(void)
{
rt_err_t result;
rt_timer_t hardware_timer;
rt_timer_t software_timer;
/* create timer */
hardware_timer = rt_timer_create("release_isr",
dynamic_release_isr_hardware_callback,
RT_NULL,
100,
RT_TIMER_FLAG_HARD_TIMER | RT_TIMER_FLAG_ONE_SHOT);
software_timer = rt_timer_create("release_isr",
dynamic_release_isr_software_callback,
RT_NULL,
100,
RT_TIMER_FLAG_SOFT_TIMER | RT_TIMER_FLAG_ONE_SHOT);
/* start tiemr */
if (hardware_timer)
rt_timer_start(hardware_timer);
if (software_timer)
rt_timer_start(software_timer);
dynamic_semaphore = rt_sem_create("static_sem", 0, RT_IPC_FLAG_PRIO);
if (RT_NULL != dynamic_semaphore)
{
for (int i = 0; i < 2; i++)
{
result = rt_sem_take(dynamic_semaphore, 1000);
if (RT_EOK != result)
{
uassert_true(RT_FALSE);
}
}
}
else
{
return;
}
rt_sem_delete(dynamic_semaphore);
rt_timer_delete(hardware_timer);
rt_timer_delete(software_timer);
uassert_true(RT_TRUE);
}
#endif /* RT_USING_HEAP */
static rt_err_t utest_tc_init(void)
{
#ifdef RT_USING_HEAP
dynamic_semaphore = RT_NULL;
#endif /* RT_USING_HEAP */
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
#ifdef RT_USING_HEAP
dynamic_semaphore = RT_NULL;
#endif /* RT_USING_HEAP */
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_static_semaphore_init);
UTEST_UNIT_RUN(test_static_semaphore_take);
UTEST_UNIT_RUN(test_static_semaphore_release);
UTEST_UNIT_RUN(test_static_semaphore_detach);
UTEST_UNIT_RUN(test_static_semaphore_trytake);
UTEST_UNIT_RUN(test_static_semaphore_control);
UTEST_UNIT_RUN(test_static_semaphore_release_isr);
#ifdef RT_USING_HEAP
UTEST_UNIT_RUN(test_dynamic_semaphore_create);
UTEST_UNIT_RUN(test_dynamic_semaphore_take);
UTEST_UNIT_RUN(test_dynamic_semaphore_release);
UTEST_UNIT_RUN(test_dynamic_semaphore_delete);
UTEST_UNIT_RUN(test_dynamic_semaphore_trytake);
UTEST_UNIT_RUN(test_dynamic_semaphore_control);
UTEST_UNIT_RUN(test_dynamic_semaphore_release_isr);
#endif /* RT_USING_HEAP */
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.semaphore_tc", utest_tc_init, utest_tc_cleanup, 1000);

View File

@ -0,0 +1,207 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-08-12 flybreak the first version
*
* case 1:rt_signal_install, install all available signal
* case 2:rt_signal_install, install illegal signal
* case 3:rt_signal_mask/unmask, one thread self, install and unmask, then kill, should received.
* case 4:rt_signal_mask/unmask, one thread self, install and unmask and mask, then kill, should can't received.
* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received.
* case 6:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: sleep 2s then kill, should can't received.
* case 7:rt_signal_kill, kill legal thread, return 0;
* case 8:rt_signal_kill, kill illegal thread, return failed (unused);
* case 9:rt_signal_kill, kill illegal signo, return -RT_EINVAL;
*
*/
#include <rtthread.h>
#include "utest.h"
static volatile int recive_sig = 0;
static struct rt_semaphore _received_signal;
void sig_handle_default(int signo)
{
recive_sig = signo;
}
static void rt_signal_install_test(void)
{
int signo;
rt_sighandler_t result;
/* case 1:rt_signal_install, install all available signal. */
for (signo = 0; signo < RT_SIG_MAX; signo++)
{
result = rt_signal_install(signo, sig_handle_default);
uassert_true(result != SIG_ERR);
}
/* case 2:rt_signal_install, install illegal signal. */
result = rt_signal_install(signo, sig_handle_default);
uassert_true(result == SIG_ERR);
return;
}
static void rt_signal_mask_test(void)
{
int signo;
rt_sighandler_t result;
/* case 3:rt_signal_mask/unmask, one thread self, install and unmask, then kill, should received. */
for (signo = 0; signo < RT_SIG_MAX; signo++)
{
recive_sig = -1;
result = rt_signal_install(signo, sig_handle_default);
uassert_true(result != SIG_ERR);
rt_signal_unmask(signo);
uassert_int_equal(rt_thread_kill(rt_thread_self(), signo), RT_EOK);
rt_thread_mdelay(1);
uassert_int_equal(recive_sig, signo);
}
return;
}
static void rt_signal_unmask_test(void)
{
int signo;
rt_sighandler_t result;
/* case 4:rt_signal_mask/unmask, one thread self, install and unmask and mask, then kill, should can't received. */
for (signo = 0; signo < RT_SIG_MAX; signo++)
{
recive_sig = -1;
result = rt_signal_install(signo, sig_handle_default);
uassert_true(result != SIG_ERR);
rt_signal_unmask(signo);
rt_signal_mask(signo);
uassert_int_equal(rt_thread_kill(rt_thread_self(), signo), RT_EOK);
rt_thread_mdelay(1);
uassert_int_not_equal(recive_sig, signo);
}
return;
}
static void rt_signal_kill_test(void)
{
int signo;
rt_sighandler_t result;
/* case 7:rt_signal_kill, kill legal thread, return 0; */
for (signo = 0; signo < RT_SIG_MAX; signo++)
{
recive_sig = -1;
result = rt_signal_install(signo, sig_handle_default);
uassert_true(result != SIG_ERR);
rt_signal_unmask(signo);
uassert_int_equal(rt_thread_kill(rt_thread_self(), signo), RT_EOK);
rt_thread_mdelay(1);
uassert_int_equal(recive_sig, signo);
}
/* case 8:rt_signal_kill, kill illegal thread, return failed; */
// uassert_true(rt_thread_kill((rt_thread_t)-1, signo) == -RT_ERROR);
/* case 9:rt_signal_kill, kill illegal signo, return -RT_EINVAL; */
uassert_true(rt_thread_kill(rt_thread_self(), -1) == -RT_EINVAL);
return;
}
void rt_signal_wait_thread(void *parm)
{
sigset_t selectset;
siginfo_t recive_si;
rt_signal_install(SIGUSR1, sig_handle_default);
rt_signal_unmask(SIGUSR1);
(void)sigemptyset(&selectset);
(void)sigaddset(&selectset, SIGUSR1);
/* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
if (rt_signal_wait((void *)&selectset, &recive_si, RT_TICK_PER_SECOND) != RT_EOK)
{
return;
}
recive_sig = recive_si.si_signo;
LOG_I("received signal %d", recive_sig);
rt_sem_release(&_received_signal);
}
static void rt_signal_wait_test(void)
{
rt_thread_t t1;
recive_sig = -1;
t1 = rt_thread_create("sig_t1", rt_signal_wait_thread, 0, 4096, 14, 10);
if (t1)
{
rt_thread_startup(t1);
}
rt_thread_mdelay(1);
/* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
rt_sem_take(&_received_signal, RT_WAITING_FOREVER);
uassert_int_equal(recive_sig, SIGUSR1);
return;
}
static void rt_signal_wait_test2(void)
{
rt_thread_t t1;
recive_sig = -1;
t1 = rt_thread_create("sig_t1", rt_signal_wait_thread, 0, 4096, 14, 10);
if (t1)
{
rt_thread_startup(t1);
}
/* case 6:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: sleep 2s then kill, should can't received. */
rt_thread_mdelay(2000);
uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
uassert_int_not_equal(
rt_sem_take(&_received_signal, 1),
RT_EOK);
uassert_int_not_equal(recive_sig, SIGUSR1);
return;
}
static rt_err_t utest_tc_init(void)
{
rt_sem_init(&_received_signal, "utest", 0, RT_IPC_FLAG_PRIO);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_received_signal);
return RT_EOK;
}
static void testcase(void)
{
#ifdef RT_USING_HEAP
UTEST_UNIT_RUN(rt_signal_install_test);
UTEST_UNIT_RUN(rt_signal_mask_test);
UTEST_UNIT_RUN(rt_signal_unmask_test);
UTEST_UNIT_RUN(rt_signal_kill_test);
UTEST_UNIT_RUN(rt_signal_wait_test);
UTEST_UNIT_RUN(rt_signal_wait_test2);
#endif /* RT_USING_HEAP */
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.signal_tc", utest_tc_init, utest_tc_cleanup, 1000);
/*********************** end of file ****************************/

View File

@ -0,0 +1,323 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-10-14 tyx the first version
*/
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#define TEST_SLAB_SIZE 1024 * 1024
static int _mem_cmp(void *ptr, rt_uint8_t v, rt_size_t size)
{
while (size-- != 0)
{
if (*(rt_uint8_t *)ptr != v)
return *(rt_uint8_t *)ptr - v;
}
return 0;
}
struct slab_alloc_context
{
rt_list_t node;
rt_size_t size;
rt_uint8_t magic;
};
struct slab_alloc_head
{
rt_list_t list;
rt_size_t count;
rt_tick_t start;
rt_tick_t end;
rt_tick_t interval;
};
#define SLAB_RANG_ALLOC_BLK_MIN 2
#define SLAB_RANG_ALLOC_BLK_MAX 5
#define SLAB_RANG_ALLOC_TEST_TIME 5
static void slab_alloc_test(void)
{
struct slab_alloc_head head;
rt_uint8_t *buf;
rt_slab_t heap;
rt_size_t size;
struct slab_alloc_context *ctx;
/* init */
rt_list_init(&head.list);
head.count = 0;
head.start = rt_tick_get();
head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
head.interval = (head.end - head.start) / 20;
buf = rt_malloc(TEST_SLAB_SIZE);
uassert_not_null(buf);
uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
heap = rt_slab_init("slab_tc", buf, TEST_SLAB_SIZE);
// test run
while (head.end - head.start < RT_TICK_MAX / 2)
{
if (rt_tick_get() - head.start >= head.interval)
{
head.start = rt_tick_get();
rt_kprintf("#");
}
// %60 probability to perform alloc operation
if (rand() % 10 >= 4)
{
size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
size *= sizeof(struct slab_alloc_context);
ctx = rt_slab_alloc(heap, size);
if (ctx == RT_NULL)
{
if (head.count == 0)
{
break;
}
size = head.count / 2;
while (size != head.count)
{
ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
rt_list_remove(&ctx->node);
if (ctx->size > sizeof(*ctx))
{
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
{
uassert_true(0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_slab_free(heap, ctx);
head.count --;
}
continue;
}
//if (RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE) != (rt_ubase_t)ctx)
//{
// uassert_int_equal(RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE), (rt_ubase_t)ctx);
//}
rt_memset(ctx, 0, size);
rt_list_init(&ctx->node);
ctx->size = size;
ctx->magic = rand() & 0xff;
if (ctx->size > sizeof(*ctx))
{
rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
}
rt_list_insert_after(&head.list, &ctx->node);
head.count += 1;
}
else
{
if (!rt_list_isempty(&head.list))
{
ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
rt_list_remove(&ctx->node);
if (ctx->size > sizeof(*ctx))
{
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
{
uassert_true(0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_slab_free(heap, ctx);
head.count --;
}
}
}
while (!rt_list_isempty(&head.list))
{
ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
rt_list_remove(&ctx->node);
if (ctx->size > sizeof(*ctx))
{
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
{
uassert_true(0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_slab_free(heap, ctx);
head.count --;
}
uassert_int_equal(head.count, 0);
// slab heap deinit
rt_slab_detach(heap);
/* release test resources */
rt_free(buf);
}
#define SLAB_RANG_REALLOC_BLK_MIN 0
#define SLAB_RANG_REALLOC_BLK_MAX 5
#define SLAB_RANG_REALLOC_TEST_TIME 5
struct slab_realloc_context
{
rt_size_t size;
rt_uint8_t magic;
};
struct slab_realloc_head
{
struct slab_realloc_context **ctx_tab;
rt_size_t count;
rt_tick_t start;
rt_tick_t end;
rt_tick_t interval;
};
static void slab_realloc_test(void)
{
struct slab_realloc_head head;
rt_uint8_t *buf;
rt_slab_t heap;
rt_size_t size, idx;
struct slab_realloc_context *ctx;
int res;
size = RT_ALIGN(sizeof(struct slab_realloc_context), RT_ALIGN_SIZE) + RT_ALIGN_SIZE;
size = TEST_SLAB_SIZE / size;
/* init */
head.ctx_tab = RT_NULL;
head.count = size;
head.start = rt_tick_get();
head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
head.interval = (head.end - head.start) / 20;
buf = rt_malloc(TEST_SLAB_SIZE);
uassert_not_null(buf);
uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
heap = rt_slab_init("slab_tc", buf, TEST_SLAB_SIZE);
/* init ctx tab */
size = head.count * sizeof(struct slab_realloc_context *);
head.ctx_tab = rt_slab_alloc(heap, size);
uassert_not_null(head.ctx_tab);
rt_memset(head.ctx_tab, 0, size);
// test run
while (head.end - head.start < RT_TICK_MAX / 2)
{
if (rt_tick_get() - head.start >= head.interval)
{
head.start = rt_tick_get();
rt_kprintf("#");
}
size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
size *= sizeof(struct slab_realloc_context);
idx = rand() % head.count;
ctx = rt_slab_realloc(heap, head.ctx_tab[idx], size);
if (ctx == RT_NULL)
{
if (size == 0)
{
if (head.ctx_tab[idx])
{
head.ctx_tab[idx] = RT_NULL;
}
}
else
{
for (idx = 0; idx < head.count; idx++)
{
ctx = head.ctx_tab[idx];
if (rand() % 2 && ctx)
{
if (ctx->size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
if (res != 0)
{
uassert_int_equal(res, 0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_slab_realloc(heap, ctx, 0);
head.ctx_tab[idx] = RT_NULL;
}
}
}
continue;
}
/* check slab */
if (head.ctx_tab[idx] != RT_NULL)
{
res = 0;
if (ctx->size < size)
{
if (ctx->size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
}
}
else
{
if (size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, size - sizeof(*ctx));
}
}
if (res != 0)
{
uassert_int_equal(res, 0);
}
}
/* init slab */
ctx->magic = rand() & 0xff;
ctx->size = size;
if (ctx->size > sizeof(*ctx))
{
rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
}
head.ctx_tab[idx] = ctx;
}
// free all slab
for (idx = 0; idx < head.count; idx++)
{
ctx = head.ctx_tab[idx];
if (ctx == RT_NULL)
{
continue;
}
if (ctx->size > sizeof(*ctx))
{
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
if (res != 0)
{
uassert_int_equal(res, 0);
}
}
rt_memset(ctx, 0xAA, ctx->size);
rt_slab_realloc(heap, ctx, 0);
head.ctx_tab[idx] = RT_NULL;
}
// slab heap deinit
rt_slab_detach(heap);
/* release test resources */
rt_free(buf);
}
static rt_err_t utest_tc_init(void)
{
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(slab_alloc_test);
UTEST_UNIT_RUN(slab_realloc_test);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.slab_tc", utest_tc_init, utest_tc_cleanup, 20);

View File

@ -0,0 +1,23 @@
menu "Kernel SMP Testcase"
config UTEST_SMP_AFFFINITY_TC
bool "smp affinity and thread priority test1"
default n
config UTEST_SMP_ASSIGNED_IDLE_CORE_TC
bool "smp threads auto assign to idle cores for test"
default n
config UTEST_SMP_INTERRUPT_PRI_TC
bool "smp interrupt priority test"
default n
config UTEST_SMP_SPINLOCK_TC
bool "smp spinlock test"
default n
config UTEST_SMP_THREAD_PREEMPTION_TC
bool "smp threads preemption test"
default n
endmenu

View File

@ -0,0 +1,27 @@
Import('rtconfig')
from building import *
cwd = GetCurrentDir()
src = []
CPPPATH = [cwd]
if GetDepend(['UTEST_SMP_SPINLOCK_TC']):
src += ['smp_spinlock_tc.c']
if GetDepend(['UTEST_SMP_ASSIGNED_IDLE_CORE_TC']):
src += ['smp_assigned_idle_cores_tc.c']
if GetDepend(['UTEST_SMP_INTERRUPT_PRI_TC']):
src += ['smp_interrupt_pri_tc.c']
if GetDepend(['UTEST_SMP_THREAD_PREEMPTION_TC']):
src += ['smp_thread_preemption_tc.c']
if GetDepend(['UTEST_SMP_AFFFINITY_TC']):
src += ['smp_bind_affinity_tc.c']
src += ['smp_affinity_pri1_tc.c']
src += ['smp_affinity_pri2_tc.c']
group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,135 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-10 RV the first version
*/
#include <rtthread.h>
#include "utest.h"
/**
* @brief Test that threads with low-priority bound cores do not preempt high-priority threads.
*
* @note Create RT_CPUS_NR threads, thread 0 is bound to core 0, lower priority, the priority of the other threads
* for the system's highest and the thread entry function does not let out the CPU control, run the specified
* number of times in thread 0 to create a high-priority not bound to the core of the thread, the thread will
* be preempted by the core 0 is running on the thread!
*/
/* Number of thread runs */
static int run_num = 10;
#define THREAD_STACK_SIZE UTEST_THR_STACK_SIZE
#define THREAD_PRIORITY 2
#define LOW_PRIORITY 50
#define THIGH_PRIORITY 10
static rt_thread_t threads[RT_CPUS_NR];
static rt_thread_t temp_thread;
static struct rt_spinlock lock;
static int thread_inc = 0;
static int num = 0;
static void thread_temp_entry(void *parameter)
{
int id = 0;
while (1)
{
id = rt_hw_cpu_id();
uassert_int_equal(id, 0);
extern long list_thread(void);
list_thread();
rt_thread_delete(temp_thread);
}
}
static void thread_entry(void *parameter)
{
int id = 0;
int para = *(int *)parameter;
while (1)
{
thread_inc++;
id = rt_hw_cpu_id();
if (para == 0)
{
if (thread_inc == run_num)
{
uassert_int_equal(id, 0);
extern long list_thread(void);
list_thread();
/* Creating high-priority untied core threads */
temp_thread = rt_thread_create("Tn", thread_temp_entry, RT_NULL, THREAD_STACK_SIZE, THIGH_PRIORITY, 20);
if (temp_thread != RT_NULL)
{
rt_thread_startup(temp_thread);
}
}
rt_thread_delay(5);
}
else
{
uassert_int_not_equal(id, 0);
while (1);
}
}
}
static void smp_affinity_pri1_tc(void)
{
static int params[RT_CPUS_NR] = {0};
char thread_name[8];
int i;
for (i = 0; i < RT_CPUS_NR; i++)
{
params[i] = i;
}
/* Creating threads with low priority bindings to core 0 */
threads[0] = rt_thread_create("T0", thread_entry, (int *)&params[0], THREAD_STACK_SIZE, LOW_PRIORITY, 20);
if (threads[0] != RT_NULL)
{
rt_thread_control(threads[0], RT_THREAD_CTRL_BIND_CPU, (void *)0);
rt_thread_startup(threads[0]);
}
/* Create high-priority unbound threads with thread functions that don't let out CPU control */
for (i = 1; i < RT_CPUS_NR; i++)
{
rt_snprintf(thread_name, sizeof(thread_name), "T%d", i);
threads[i] = rt_thread_create(thread_name, thread_entry, (int *)&params[i], THREAD_STACK_SIZE, THREAD_PRIORITY, 20);
if (threads[i] != RT_NULL)
{
rt_thread_control(threads[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
rt_thread_startup(threads[i]);
}
}
rt_thread_delay(100);
}
static rt_err_t utest_tc_init(void)
{
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
for (num = 0; num < RT_CPUS_NR; num++)
{
rt_thread_delete(threads[num]);
}
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(smp_affinity_pri1_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.smp.affinity_pri1_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-10 RV the first version
*/
#include <rtthread.h>
#include "utest.h"
/**
* @brief Threads are automatically balanced across cores.
*
* @note Create RT_CPUS_NR threads, thread 0 is not bound to core 0, higher priority, the priority of the other
* threads for the system's highest and the thread entry function does not let out the CPU control, run the
* specified number of times after the creation of thread 0 in thread 0, a low-priority bound to the core 0,
* the thread will not preempt the core 0 is running on threads
*/
/* Number of thread runs */
static int run_num = 10;
#define THREAD_STACK_SIZE UTEST_THR_STACK_SIZE
#define THREAD_PRIORITY 2
#define LOW_PRIORITY 50
#define THIGH_PRIORITY 10
static rt_thread_t threads[RT_CPUS_NR];
static rt_thread_t temp_thread;
static struct rt_spinlock lock;
static int thread_inc = 0;
static int run_flag = 0;
static int num = 0;
static void thread_temp_entry(void *parameter)
{
run_flag = 1;
rt_thread_delete(temp_thread);
}
static void thread_entry(void *parameter)
{
int id = 0;
int para = *(int *)parameter;
while (1)
{
thread_inc++;
id = rt_hw_cpu_id();
if (para == 0)
{
if (thread_inc == run_num)
{
uassert_int_equal(id, 0);
temp_thread = rt_thread_create("Tn", thread_temp_entry, RT_NULL, THREAD_STACK_SIZE, LOW_PRIORITY, 20);
if (temp_thread != RT_NULL)
{
rt_thread_control(temp_thread, RT_THREAD_CTRL_BIND_CPU, (void *)0);
rt_thread_startup(temp_thread);
uassert_int_not_equal(run_flag, 1);
}
}
rt_thread_delay(5);
}
else
{
uassert_int_not_equal(id, 0);
while (1);
}
}
}
static void smp_affinity_pri2_tc(void)
{
static int params[RT_CPUS_NR] = {0};
char thread_name[8];
int i;
for (i = 0; i < RT_CPUS_NR; i++)
{
params[i] = i;
}
threads[0] = rt_thread_create("T0", thread_entry, (int *)&params[0], THREAD_STACK_SIZE, THIGH_PRIORITY, 20);
if (threads[0] != RT_NULL)
{
uassert_true(1);
rt_thread_startup(threads[0]);
}
/* Create high-priority unbound threads with thread functions that don't let out CPU control */
for (i = 1; i < RT_CPUS_NR; i++)
{
rt_snprintf(thread_name, sizeof(thread_name), "T%d", i);
threads[i] = rt_thread_create(thread_name, thread_entry, (int *)&params[i], THREAD_STACK_SIZE, THREAD_PRIORITY, 20);
if (threads[i] != RT_NULL)
{
uassert_true(1);
rt_thread_control(threads[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
rt_thread_startup(threads[i]);
}
}
rt_thread_delay(50);
}
static rt_err_t utest_tc_init(void)
{
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
for (num = 0; num < RT_CPUS_NR; num++)
{
rt_thread_delete(threads[num]);
}
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(smp_affinity_pri2_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.smp.affinity_pri2_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-10 RV the first version
*/
#include <rtthread.h>
#include "utest.h"
/**
* @brief Threads are automatically balanced across cores.
*
* @note Create multiple threads untied core threads, run them for a while on each core to see
* if the threads are automatically distributed evenly, run for a while to output the threads
* running on each core.
*/
#define THREAD_STACK_SIZE UTEST_THR_STACK_SIZE
#define THREAD_PRIORITY 20
static rt_thread_t threads[RT_CPUS_NR];
static int tick = 0, finsh_flag = 0;
static int num = 0;
/* thread entry function */
static void thread_entry(void *parameter)
{
while (1)
{
tick++;
if (tick == 100)
{
/* Output the current core running threads */
extern long list_thread(void);
list_thread();
finsh_flag = 0xA55A;
uassert_true(1);
}
rt_thread_delay(5);
}
}
static void thread_on_idle_core_tc(void)
{
static int params[RT_CPUS_NR] = {0};
char thread_name[8];
int i;
/* Initialise the thread entry parameters */
for (i = 0; i < RT_CPUS_NR; i++)
{
params[i] = i;
}
/* Create RT_CPUS_NR threads and pass the entry parameters for each thread */
for (i = 0; i < RT_CPUS_NR; i++)
{
rt_snprintf(thread_name, sizeof(thread_name), "T%d", i);
threads[i] = rt_thread_create(thread_name, thread_entry, &params[i], THREAD_STACK_SIZE, THREAD_PRIORITY, 20);
if (threads[i] != RT_NULL)
{
uassert_true(1);
rt_thread_startup(threads[i]);
}
}
/* Waiting for test cases to finish */
while (finsh_flag != 0xA55A);
}
static rt_err_t utest_tc_init(void)
{
rt_kprintf("[Test case]: created threads are automatically assigned to run on idle cores\r\n");
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
for (num = 0; num < RT_CPUS_NR; num++)
{
rt_thread_delete(threads[num]);
}
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(thread_on_idle_core_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.smp.assigned_idle_cores_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-10 RV the first version
*/
#include <rtthread.h>
#include "utest.h"
/**
* @brief Binding core affinity testcase.
*
* @note Create RT_CPUS_NR threads, thread 0 is bound to core 0, other threads are not bound to specific cores,
* after running for a set number of times, count the number of times each core is run on the corresponding core,
* thread 0 should always be run on core 0, other threads will be run on different cores.
*/
/* Number of thread runs */
static int run_num = 100;
#define THREAD_STACK_SIZE UTEST_THR_STACK_SIZE
#define THREAD_PRIORITY 20
static rt_thread_t threads[RT_CPUS_NR];
static struct rt_spinlock lock;
static int thread_inc[RT_CPUS_NR] = {0};
static int thread_tic[RT_CPUS_NR] = {0};
static int finsh_flag = 0;
static int num = 0;
static void thread_entry(void *parameter)
{
int id = 0;
int para = *(int *)parameter;
while (1)
{
thread_tic[para]++;
id = rt_hw_cpu_id();
if (para == id)
{
thread_inc[para]++;
}
if (thread_tic[para] == run_num)
{
if (para == 0)
uassert_int_equal(thread_inc[para], thread_tic[para]);
else
uassert_int_not_equal(thread_inc[para], thread_tic[para]);
finsh_flag ++;
}
rt_thread_delay(5);
}
}
static void thread_bind_affinity_tc(void)
{
static int params[RT_CPUS_NR] = {0};
char thread_name[8];
int i, j;
for (i = 0; i < RT_CPUS_NR; i++)
{
params[i] = i;
}
/* Create RT_CPUS_NR threads Thread 0 is bound to core 0 Other threads are not bound */
for (i = 0; i < RT_CPUS_NR; i++)
{
rt_snprintf(thread_name, sizeof(thread_name), "thread%d", i);
threads[i] = rt_thread_create(thread_name, thread_entry, (int *)&params[i], THREAD_STACK_SIZE, THREAD_PRIORITY, 20);
if (i == 0)
{
rt_thread_control(threads[0], RT_THREAD_CTRL_BIND_CPU, (void *)0);
}
if (threads[i] != RT_NULL)
{
rt_thread_startup(threads[i]);
}
}
while (finsh_flag != RT_CPUS_NR);
/* Displays the number of times a thread was executed on the relevant core */
for (j = 0; j < RT_CPUS_NR; j++)
{
rt_spin_lock(&lock);
rt_kprintf("Total runs[%d], Number of times thread[%d] run on [core%d]: [%4d], always run at core%d ? %s \r\n", run_num, j, j, thread_inc[j], j, (thread_inc[j] == run_num) ? "yes" : "no");
rt_spin_unlock(&lock);
}
}
static rt_err_t utest_tc_init(void)
{
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
for (num = 0; num < RT_CPUS_NR; num++)
{
rt_thread_delete(threads[num]);
}
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(thread_bind_affinity_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.smp.bind_affinity_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,120 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-10 RV the first version
*/
#include <rtthread.h>
#include "utest.h"
#include <interrupt.h>
/**
* @brief Setting the Interrupt Priority Test.
*
* @note Without turning off interrupts, interrupts respond in the order in which they are triggered.
* With interrupts turned off, low and high priority interrupts are triggered sequentially,
* and when interrupts are turned on, high priority interrupts respond first.
*/
#define RES_VAL 0X0
#define SET_VAL 0XA
#define RT_SPI_1 1
#define RT_SPI_2 2
#define RT_SPI_PRI_HIGH 120
#define RT_SPI_PRI_LOW 140
static int mode = 0;
static int ipi_val[2] = {0, 0};
/* Software Interrupt 1 Service Functions */
static void rt_scheduler_ipi1_handler(int vector, void *param)
{
ipi_val[0] = SET_VAL;
if (mode == 0)
{
uassert_true(ipi_val[0] > ipi_val[1]);
}
else
{
ipi_val[0] = RES_VAL;
ipi_val[1] = RES_VAL;
}
}
/* Software Interrupt 2 Service Functions */
static void rt_scheduler_ipi2_handler(int vector, void *param)
{
ipi_val[1] = SET_VAL;
if (mode == 0)
{
ipi_val[0] = RES_VAL;
ipi_val[1] = RES_VAL;
}
else
{
uassert_true(ipi_val[0] < ipi_val[1]);
}
}
/* Interrupt priority testcases 1 */
static void int_pri1_tc(void)
{
mode = 0;
unsigned int pri1, pri2;
pri1 = rt_hw_interrupt_get_priority(RT_SPI_1);
pri2 = rt_hw_interrupt_get_priority(RT_SPI_2);
if (pri1 < pri2)
uassert_true(pri1 < pri2);
/* Trigger interrupt */
rt_hw_ipi_send(RT_SPI_1, 0x1);
rt_hw_ipi_send(RT_SPI_2, 0x1);
rt_thread_delay(5);
}
/* Interrupt priority testcases 2 */
static void int_pri2_tc(void)
{
mode = 1;
unsigned int pri1, pri2;
pri1 = rt_hw_interrupt_get_priority(RT_SPI_1);
pri2 = rt_hw_interrupt_get_priority(RT_SPI_2);
if (pri1 < pri2)
uassert_true(pri1 < pri2);
rt_base_t level = rt_hw_local_irq_disable();
/* Trigger interrupt */
rt_hw_ipi_send(RT_SPI_1, 0x1);
rt_hw_ipi_send(RT_SPI_2, 0x1);
rt_hw_local_irq_enable(level);
rt_thread_delay(5);
}
static rt_err_t utest_tc_init(void)
{
/* Setting the priority of software interrupts */
rt_hw_interrupt_set_priority(RT_SPI_1, RT_SPI_PRI_LOW);
rt_hw_interrupt_set_priority(RT_SPI_2, RT_SPI_PRI_HIGH);
/* Register software interrupt service functions */
rt_hw_ipi_handler_install(RT_SPI_1, rt_scheduler_ipi1_handler);
rt_hw_ipi_handler_install(RT_SPI_2, rt_scheduler_ipi2_handler);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(int_pri1_tc);
UTEST_UNIT_RUN(int_pri2_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.smp.interrupt_pri_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-13 RV the first version
*/
#include <rtthread.h>
#include "utest.h"
#include <interrupt.h>
/**
* @brief Spinlock testcase.
*
* @note Create multiple threads and use spinlocks to protect shared memory
*/
#define THREAD_PRIORITY 20
#define THREAD_TIMESLICE 20
#define THREAD_STACK_SIZE UTEST_THR_STACK_SIZE
static rt_thread_t thread1;
static rt_thread_t thread2;
static rt_uint8_t finsh_flag = 0;
static struct rt_spinlock lock;
static rt_uint8_t number1, number2 = 0;
static void rt_thread_entry1(void *parameter)
{
while (1)
{
rt_spin_lock(&lock);
number1++;
rt_thread_yield();
number2++;
rt_spin_unlock(&lock);
rt_thread_delay(5);
}
}
static void rt_thread_entry2(void *parameter)
{
while (1)
{
rt_spin_lock(&lock);
uassert_int_equal(number1, number2);
number1++;
number2++;
rt_spin_unlock(&lock);
if (number1 >= 10)
{
finsh_flag = 1;
}
rt_thread_delay(5);
}
}
static void smp_spinlock_tc(void)
{
thread1 = rt_thread_create("T1", rt_thread_entry1, RT_NULL, THREAD_STACK_SIZE, THREAD_PRIORITY, 20);
if (thread1 != RT_NULL)
{
rt_thread_startup(thread1);
}
thread2 = rt_thread_create("T2", rt_thread_entry2, RT_NULL, THREAD_STACK_SIZE, THREAD_PRIORITY - 1, 20);
if (thread2 != RT_NULL)
{
rt_thread_startup(thread2);
}
while (finsh_flag == 0);
}
static rt_err_t utest_tc_init(void)
{
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_thread_delete(thread1);
rt_thread_delete(thread2);
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(smp_spinlock_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.smp.spinlock_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-10 RV the first version
*/
#include <rtthread.h>
#include "utest.h"
/**
* @brief Thread Preemption Test with Different Priorities.
*
* @note Create multiple threads, low-priority threads run first,
* high-priority threads preempt low-priority threads, and
* print the current status of each core in the thread's entry function.
*/
#define THREAD_PRIORITY_HIGH 21
#define THREAD_PRIORITY_LOW 30
#define THREAD_STACK_SIZE UTEST_THR_STACK_SIZE
static rt_thread_t threads[2];
static struct rt_spinlock lock;
/* High Priority Thread */
static void thread_high_entry(void *parameter)
{
uassert_true(1);
rt_spin_lock(&lock);
rt_kprintf("High priority thread is running\n");
extern long list_thread(void);
list_thread();
rt_spin_unlock(&lock);
}
/* Low Priority Thread */
static void thread_low_entry(void *parameter)
{
uassert_true(1);
rt_spin_lock(&lock);
rt_kprintf("Low priority thread is running\n");
extern long list_thread(void);
list_thread();
rt_spin_unlock(&lock);
}
static void thread_preemptions_tc(void)
{
/* Creating low-priority thread */
threads[0] = rt_thread_create("tlow", thread_low_entry, RT_NULL, THREAD_STACK_SIZE, THREAD_PRIORITY_LOW, 10);
if (threads[0] != RT_NULL)
{
uassert_true(1);
rt_thread_startup(threads[0] );
}
rt_thread_delay(5);
/* Creating high-priority thread */
threads[1] = rt_thread_create("thigh", thread_high_entry, RT_NULL, THREAD_STACK_SIZE, THREAD_PRIORITY_HIGH, 10);
if (threads[1] != RT_NULL)
{
uassert_true(1);
rt_thread_startup(threads[1]);
}
rt_thread_delay(50);
}
static rt_err_t utest_tc_init(void)
{
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(thread_preemptions_tc);
}
UTEST_TC_EXPORT(testcase, "testcases.smp.thread_preemptions_tc", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,756 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09.01 yangjie the firet version
* 2021-10.11 mazhiyuan add idle, yield, suspend, control, priority, delay_until
*/
#define __RT_IPC_SOURCE__ /* include internal API for utest */
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#define THREAD_STACK_SIZE UTEST_THR_STACK_SIZE
#define THREAD_TIMESLICE 10
rt_align(RT_ALIGN_SIZE)
static char thread2_stack[UTEST_THR_STACK_SIZE];
static struct rt_thread thread2;
#ifdef RT_USING_HEAP
static rt_thread_t tid1 = RT_NULL;
static rt_thread_t tid3 = RT_NULL;
static rt_thread_t tid4 = RT_NULL;
static rt_thread_t tid5 = RT_NULL;
static rt_thread_t tid6 = RT_NULL;
static rt_thread_t tid7 = RT_NULL;
#endif /* RT_USING_HEAP */
static volatile rt_uint32_t tid3_delay_pass_flag = 0;
static volatile rt_uint32_t tid3_finish_flag = 0;
static volatile rt_uint32_t tid4_finish_flag = 0;
static volatile rt_uint32_t tid6_finish_flag = 0;
static volatile rt_uint32_t thread5_source = 0;
#ifndef RT_USING_SMP
static volatile rt_uint32_t thread_yield_flag = 0;
#endif
static volatile rt_uint32_t entry_idle_hook_times = 0;
static rt_thread_t __current_thread;
static rt_uint8_t change_priority;
static volatile rt_uint32_t count = 0;
void thread1_entry(void *param)
{
while (1);
}
static void test_dynamic_thread(void)
{
rt_err_t ret_startup = -RT_ERROR;
rt_err_t ret_delete = -RT_ERROR;
tid1 = rt_thread_create("thread1",
thread1_entry,
(void *)1,
THREAD_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
THREAD_TIMESLICE - 5);
if (tid1 == RT_NULL)
{
uassert_false(tid1 == RT_NULL);
goto __exit;
}
ret_startup = rt_thread_startup(tid1);
if (ret_startup != RT_EOK)
{
uassert_false(ret_startup != RT_EOK);
goto __exit;
}
ret_delete = rt_thread_delete(tid1);
if (ret_delete != RT_EOK)
{
uassert_false(ret_delete != RT_EOK);
goto __exit;
}
uassert_true(tid1 != RT_NULL && ret_startup == RT_EOK && ret_delete == RT_EOK);
__exit:
if (tid1 != RT_NULL && ret_delete != RT_EOK)
{
rt_thread_delete(tid1);
}
return;
}
void thread2_entry(void *param)
{
while (1);
}
static void test_static_thread(void)
{
rt_err_t ret_init = -RT_ERROR;
rt_err_t ret_startup = -RT_ERROR;
rt_err_t ret_detach = -RT_ERROR;
ret_init = rt_thread_init(&thread2,
"thread2",
thread2_entry,
(void *)2,
&thread2_stack[0],
sizeof(thread2_stack),
UTEST_THR_PRIORITY + 1,
THREAD_TIMESLICE);
if (ret_init != RT_EOK)
{
uassert_false(ret_init != RT_EOK);
goto __exit;
}
ret_startup = rt_thread_startup(&thread2);
if (ret_startup != RT_EOK)
{
uassert_false(ret_startup != RT_EOK);
goto __exit;
}
ret_detach = rt_thread_detach(&thread2);
if (ret_detach != RT_EOK)
{
uassert_false(ret_detach != RT_EOK);
goto __exit;
}
uassert_true(ret_init == RT_EOK && ret_startup == RT_EOK && ret_detach == RT_EOK);
__exit:
if (ret_init == RT_EOK && ret_detach != RT_EOK)
{
rt_thread_detach(&thread2);
}
return;
}
static void thread3_entry(void *parameter)
{
rt_tick_t tick, latency_tick;
tick = rt_tick_get();
rt_thread_delay(15);
latency_tick = rt_tick_get() - tick;
if (latency_tick > 16 || latency_tick < 15)
{
tid3_finish_flag = 1;
tid3_delay_pass_flag = 0;
return;
}
tid3_delay_pass_flag = 1;
tid3_finish_flag = 1;
}
static void test_thread_delay(void)
{
rt_err_t ret_startup = -RT_ERROR;
tid3 = rt_thread_create("thread3",
thread3_entry,
RT_NULL,
THREAD_STACK_SIZE,
UTEST_THR_PRIORITY - 1,
THREAD_TIMESLICE);
if (tid3 == RT_NULL)
{
LOG_E("rt_thread_create failed!");
uassert_false(tid3 == RT_NULL);
goto __exit;
}
ret_startup = rt_thread_startup(tid3);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
goto __exit;
}
while (tid3_finish_flag != 1);
uassert_true(tid3_delay_pass_flag == 1);
__exit:
return;
}
static void idle_hook(void)
{
entry_idle_hook_times ++;
}
static void thread4_entry(void *parameter)
{
rt_uint32_t delay_times = 5;
while (delay_times --)
{
rt_thread_mdelay(300);
}
rt_thread_idle_delhook(idle_hook);
tid4_finish_flag = 1;
}
static void test_idle_hook(void)
{
rt_err_t ret_startup = -RT_ERROR;
rt_thread_idle_sethook(idle_hook);
tid4 = rt_thread_create("thread4",
thread4_entry,
RT_NULL,
THREAD_STACK_SIZE,
UTEST_THR_PRIORITY - 1,
THREAD_TIMESLICE);
if (tid4 == RT_NULL)
{
LOG_E("rt_thread_create failed!");
uassert_false(tid4 == RT_NULL);
goto __exit;
}
ret_startup = rt_thread_startup(tid4);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
goto __exit;
}
while (tid4_finish_flag != 1)
{
rt_thread_mdelay(200);
}
uassert_true(entry_idle_hook_times > 0);
__exit:
return;
}
static void thread5_entry(void *parameter)
{
while (1)
{
thread5_source ++;
rt_thread_delay(5);
if (thread5_source == 5)
{
rt_thread_yield();
}
}
}
static void thread6_entry(void *parameter)
{
while (++ thread5_source <= 9);
tid6_finish_flag = 1;
}
static void test_thread_yield(void)
{
rt_err_t ret_startup = -RT_ERROR;
thread5_source = 0;
tid5 = rt_thread_create("thread5",
thread5_entry,
RT_NULL,
THREAD_STACK_SIZE,
UTEST_THR_PRIORITY - 1,
THREAD_TIMESLICE);
if (tid5 == RT_NULL)
{
LOG_E("rt_thread_create failed!");
uassert_false(tid5 == RT_NULL);
goto __exit;
}
ret_startup = rt_thread_startup(tid5);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
goto __exit;
}
tid6 = rt_thread_create("thread6",
thread6_entry,
RT_NULL,
THREAD_STACK_SIZE,
UTEST_THR_PRIORITY - 1,
THREAD_TIMESLICE);
if (tid6 == RT_NULL)
{
LOG_E("rt_thread_create failed!");
uassert_false(tid6 == RT_NULL);
goto __exit;
}
ret_startup = rt_thread_startup(tid6);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
goto __exit;
}
while (tid6_finish_flag != 1);
uassert_true(thread5_source == 10);
__exit:
if (tid5 != RT_NULL)
{
rt_thread_delete(tid5);
}
return;
}
static void thread7_entry(void *parameter)
{
while (1);
}
static void test_thread_control(void)
{
rt_err_t ret_control = -RT_ERROR;
rt_err_t rst_delete = -RT_ERROR;
rt_sched_lock_level_t slvl;
tid7 = rt_thread_create("thread7",
thread7_entry,
RT_NULL,
THREAD_STACK_SIZE,
UTEST_THR_PRIORITY + 1,
THREAD_TIMESLICE);
if (tid7 == RT_NULL)
{
LOG_E("rt_thread_create failed!");
uassert_false(tid7 == RT_NULL);
goto __exit;
}
ret_control = rt_thread_control(tid7, RT_THREAD_CTRL_STARTUP, RT_NULL);
if (ret_control != RT_EOK)
{
LOG_E("rt_thread_control failed!");
uassert_false(1);
goto __exit;
}
rt_thread_mdelay(200);
rt_thread_control(tid7, RT_THREAD_CTRL_CHANGE_PRIORITY, &change_priority);
rt_sched_lock(&slvl);
if (rt_sched_thread_get_curr_prio(tid7) != change_priority)
{
LOG_E("rt_thread_control failed!");
uassert_false(1);
rt_sched_unlock(slvl);
goto __exit;
}
rt_sched_unlock(slvl);
rst_delete = rt_thread_control(tid7, RT_THREAD_CTRL_CLOSE, RT_NULL);
if (rst_delete != RT_EOK)
{
LOG_E("rt_thread_control failed!");
uassert_false(rst_delete != RT_EOK);
goto __exit;
}
uassert_true(1);
__exit:
if (tid7 != RT_NULL && rst_delete != RT_EOK)
{
rt_thread_delete(tid7);
}
return;
}
static void thread8_entry(void *parameter)
{
for (; count < 10; count ++);
}
static void test_thread_priority(void)
{
rt_err_t ret_startup = -RT_ERROR;
rt_thread_t tid8 = RT_NULL;
tid8 = rt_thread_create("thread8",
thread8_entry,
RT_NULL,
THREAD_STACK_SIZE,
UTEST_THR_PRIORITY - 1,
THREAD_TIMESLICE);
if (tid8 == RT_NULL)
{
LOG_E("rt_thread_create failed!");
uassert_false(tid8 == RT_NULL);
return;
}
count = 0;
ret_startup = rt_thread_startup(tid8);
if (ret_startup != RT_EOK)
{
uassert_false(ret_startup != RT_EOK);
return ;
}
uassert_true(count == 10);
return;
}
static void test_delay_until(void)
{
rt_tick_t tick;
rt_tick_t check_tick = 0;
rt_tick_t delta = 0;
tick = rt_tick_get();
check_tick = tick;
rt_thread_delay_until(&tick, 100);
delta = rt_tick_get() - check_tick;
rt_kprintf("delta[100] -> %d\n", delta);
uassert_int_equal(delta, 100);
check_tick = tick;
rt_thread_delay(2);
rt_thread_delay_until(&tick, 200);
delta = rt_tick_get() - check_tick;
rt_kprintf("delta[200] -> %d\n", delta);
uassert_int_equal(delta, 200);
check_tick = tick;
rt_thread_delay(2);
rt_thread_delay_until(&tick, 300);
delta = rt_tick_get() - check_tick;
rt_kprintf("delta[300] -> %d\n", delta);
uassert_int_equal(delta, 300);
check_tick = tick;
rt_thread_delay(2);
rt_thread_delay_until(&tick, 100);
delta = rt_tick_get() - check_tick;
uassert_int_equal(delta, 100);
check_tick = tick;
rt_thread_delay(2);
rt_thread_delay_until(&tick, 50);
delta = rt_tick_get() - check_tick;
rt_kprintf("delta[50] -> %d\n", delta);
uassert_int_equal(delta, 50);
check_tick = tick;
rt_thread_delay(2);
rt_thread_delay_until(&tick, 20);
delta = rt_tick_get() - check_tick;
rt_kprintf("delta[20] -> %d\n", delta);
uassert_int_equal(delta, 20);
/**
* the rt_kprints above can take few ticks to complete, maybe more than 10
*/
tick = rt_tick_get();
check_tick = tick;
rt_thread_delay(2);
rt_thread_delay_until(&tick, 10);
delta = rt_tick_get() - check_tick;
rt_kprintf("delta[10] -> %d\n", delta);
uassert_int_equal(delta, 10);
}
static rt_thread_t tidA, tidB1, tidB2;
static uint32_t timeslice_cntA, timeslice_cntB1, timeslice_cntB2;
static void test_timeslice_threadA_entry(void *parameter)
{
while (1)
{
rt_thread_delay(2);
timeslice_cntA++;
if (timeslice_cntA > 10) return;
}
}
static void test_timeslice_threadB1_entry(void *parameter)
{
while (1)
{
timeslice_cntB1++;
if (timeslice_cntA > 10) return;
}
}
static void test_timeslice_threadB2_entry(void *parameter)
{
while (1)
{
timeslice_cntB2++;
if (timeslice_cntA > 10) return;
}
}
void test_timeslice(void)
{
rt_err_t ret_startup = -RT_ERROR;
uint32_t diff;
timeslice_cntA = 0;
timeslice_cntB1 = 0;
timeslice_cntB2 = 0;
tidA = rt_thread_create("timeslice", test_timeslice_threadA_entry, RT_NULL,
2048, UTEST_THR_PRIORITY + 1, 10);
if (!tidA)
{
LOG_E("rt_thread_create failed!");
return;
}
rt_thread_control(tidA, RT_THREAD_CTRL_BIND_CPU, (void *)1);
ret_startup = rt_thread_startup(tidA);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
return ;
}
tidB1 = rt_thread_create("timeslice", test_timeslice_threadB1_entry, RT_NULL,
2048, UTEST_THR_PRIORITY + 2, 2);
if (!tidB1)
{
LOG_E("rt_thread_create failed!");
return;
}
rt_thread_control(tidB1, RT_THREAD_CTRL_BIND_CPU, (void *)1);
ret_startup = rt_thread_startup(tidB1);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
return ;
}
tidB2 = rt_thread_create("timeslice", test_timeslice_threadB2_entry, RT_NULL,
2048, UTEST_THR_PRIORITY + 2, 2);
if (!tidB2)
{
LOG_E("rt_thread_create failed!");
return;
}
rt_thread_control(tidB2, RT_THREAD_CTRL_BIND_CPU, (void *)1);
ret_startup = rt_thread_startup(tidB2);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
return ;
}
do{
rt_thread_delay(2 * 20);
}while(timeslice_cntA <= 10);
rt_kprintf("A:%d,B1:%d,B2:%d\n", timeslice_cntA, timeslice_cntB1, timeslice_cntB2);
diff = abs(timeslice_cntB1 - timeslice_cntB2);
uassert_true(diff * 100 / timeslice_cntB1 < 30);
uassert_true(timeslice_cntA == 11);
}
#ifndef RT_USING_SMP
static volatile rt_uint32_t yield_count;
static void test_thread_yield_inc_entry(void *parameter)
{
rt_uint32_t loop = 0;
while (1)
{
if (loop++ > 10001)
break;
yield_count++;
rt_thread_yield();
}
}
static void test_thread_yield_entry(void *parameter)
{
rt_err_t ret_startup = -RT_ERROR;
rt_thread_t tid;
rt_uint32_t loop = 0;
rt_uint32_t count_before;
tid = rt_thread_create("inc", test_thread_yield_inc_entry, RT_NULL,
2048, 1, 10);
if (!tid)
{
LOG_E("rt_thread_create failed!");
return;
}
ret_startup = rt_thread_startup(tid);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
return ;
}
while (1)
{
if (loop++ > 10000)
break;
count_before = yield_count;
rt_thread_yield();
if (yield_count == count_before)
{
LOG_E("yield error!");
return;
}
}
thread_yield_flag = 1;
}
void test_thread_yield_nosmp(void)
{
rt_err_t ret_startup = -RT_ERROR;
rt_thread_t tid;
yield_count = 0;
tid = rt_thread_create("chkcnt", test_thread_yield_entry, RT_NULL,
2048, 1, 10);
if (!tid)
{
LOG_E("rt_thread_create failed!");
return;
}
ret_startup = rt_thread_startup(tid);
if (ret_startup != RT_EOK)
{
LOG_E("rt_thread_startup failed!");
uassert_false(1);
return ;
}
uassert_true(thread_yield_flag == 1);
}
// static rt_uint32_t thread9_count = 0;
// static void thread9_entry(void *parameter)
// {
// while (1)
// {
// thread9_count ++;
// }
// }
// static void test_thread_suspend(void)
// {
// static rt_thread_t tid;
// rt_err_t ret_startup = -RT_ERROR;
// uint32_t count_before_suspend, count_before_resume, count_after_resume;
// tid = rt_thread_create("thread9",
// thread9_entry,
// RT_NULL,
// THREAD_STACK_SIZE,
// UTEST_THR_PRIORITY + 1,
// THREAD_TIMESLICE);
// if (tid == RT_NULL)
// {
// LOG_E("rt_thread_create failed!");
// uassert_false(tid4 == RT_NULL);
// goto __exit;
// }
// ret_startup = rt_thread_startup(tid);
// if (ret_startup != RT_EOK)
// {
// LOG_E("rt_thread_startup failed!");
// uassert_false(1);
// goto __exit;
// }
// rt_thread_delay(5);
// rt_thread_suspend(tid);
// count_before_suspend = thread9_count;
// uassert_true(count_before_suspend != 0);
// rt_thread_delay(5);
// count_before_resume = thread9_count;
// uassert_true(count_before_suspend == count_before_resume);
// rt_thread_resume(tid);
// rt_thread_delay(5);
// count_after_resume = thread9_count;
// uassert_true(count_after_resume != count_before_resume);
// __exit:
// if (tid != RT_NULL)
// {
// rt_thread_delete(tid);
// }
// return;
// }
#endif
static rt_err_t utest_tc_init(void)
{
__current_thread = rt_thread_self();
change_priority = UTEST_THR_PRIORITY + 5;
tid3_delay_pass_flag = 0;
tid3_finish_flag = 0;
tid4_finish_flag = 0;
tid6_finish_flag = 0;
entry_idle_hook_times = 0;
count = 0;
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
/* init, detach */
UTEST_UNIT_RUN(test_static_thread);
/* create, delete */
UTEST_UNIT_RUN(test_dynamic_thread);
/* delay */
UTEST_UNIT_RUN(test_thread_delay);
/* idle_sethook, idle_delhook */
UTEST_UNIT_RUN(test_idle_hook);
/* yield */
UTEST_UNIT_RUN(test_thread_yield);
#ifndef RT_USING_SMP
/* yield_nosmp */
UTEST_UNIT_RUN(test_thread_yield_nosmp);
/* suspend, resume */
// UTEST_UNIT_RUN(test_thread_suspend);
#endif
/* control */
UTEST_UNIT_RUN(test_thread_control);
UTEST_UNIT_RUN(test_thread_priority);
/* delay_until */
UTEST_UNIT_RUN(test_delay_until);
/* timeslice */
// UTEST_UNIT_RUN(test_timeslice); /* Can not running in Github Action QEMU */
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.thread_tc", utest_tc_init, utest_tc_cleanup, 1000);
/********************* end of file ************************/

View File

@ -0,0 +1,747 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-08-12 luckyzjq the first version
*/
#include <rtthread.h>
#include <stdlib.h>
#include "utest.h"
#undef uassert_true
#define uassert_true(value) \
do \
{ \
if (!(value)) \
{ \
__utest_assert(value, "(" #value ") is false"); \
} \
} while (0)
/* notify user that the test is not corrupted */
#define PRINT_PROGRESS(id) LOG_I("Testing on %d", id)
static rt_uint8_t timer_flag_oneshot[] = {
RT_TIMER_FLAG_ONE_SHOT,
RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_HARD_TIMER,
RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER,
};
static rt_uint8_t timer_flag_periodic[] = {
RT_TIMER_FLAG_PERIODIC,
RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_HARD_TIMER,
RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_SOFT_TIMER,
};
typedef struct test_timer_struct
{
struct rt_timer static_timer; /* static timer handler */
rt_timer_t dynamic_timer; /* dynamic timer pointer */
rt_tick_t expect_tick; /* expect tick */
rt_ubase_t callbacks; /* timer callback times */
rt_bool_t is_static; /* static or dynamic timer */
} timer_struct;
static timer_struct timer;
static void timer_oneshot(void *param)
{
timer_struct *timer_call;
timer_call = (timer_struct *)param;
timer_call->callbacks++;
uassert_true(rt_tick_get() == timer_call->expect_tick);
}
static void timer_periodic(void *param)
{
rt_err_t result;
timer_struct *timer_call;
timer_call = (timer_struct *)param;
timer_call->callbacks++;
uassert_true(rt_tick_get() == timer_call->expect_tick);
if (timer_call->is_static)
{
timer_call->expect_tick = rt_tick_get() + timer_call->static_timer.init_tick;
}
else
{
timer_call->expect_tick = rt_tick_get() + timer_call->dynamic_timer->init_tick;
}
if (timer_call->callbacks == 5)
{
/* periodic timer can stop */
if (timer_call->is_static)
{
result = rt_timer_stop(&timer_call->static_timer);
}
else
{
result = rt_timer_stop(timer_call->dynamic_timer);
}
uassert_true(result == RT_EOK);
}
}
static void test_static_timer(void)
{
rt_err_t result;
timer.callbacks = 0;
timer.is_static = RT_TRUE;
/* one shot timer test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_oneshot); i++)
{
rt_timer_init(&timer.static_timer,
"static_timer",
timer_oneshot,
&timer,
time_out,
timer_flag_oneshot[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(&timer.static_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(3 * time_out + 1);
uassert_true(timer.callbacks == 1);
/* detach timer */
result = rt_timer_detach(&timer.static_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
/* periodic timer test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_periodic); i++)
{
rt_timer_init(&timer.static_timer,
"static_timer",
timer_periodic,
&timer,
time_out,
timer_flag_periodic[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(&timer.static_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(5 * time_out + 1);
uassert_true(timer.callbacks >= 5);
/* detach timer */
result = rt_timer_detach(&timer.static_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
}
static void test_static_timer_start_twice(void)
{
rt_err_t result;
timer.callbacks = 0;
timer.is_static = RT_TRUE;
/* timer start twice test */
for (int time_out = 2; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_oneshot); i++)
{
rt_timer_init(&timer.static_timer,
"static_timer",
timer_oneshot,
&timer,
time_out,
timer_flag_oneshot[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(&timer.static_timer);
uassert_true(result == RT_EOK);
rt_thread_delay(1);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(&timer.static_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(3 * time_out + 1);
uassert_true(timer.callbacks == 1);
/* detach timer */
result = rt_timer_detach(&timer.static_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
}
static void timer_control(void *param)
{
rt_err_t result;
timer_struct *timer_call;
timer_call = (timer_struct *)param;
timer_call->callbacks++;
uassert_true(rt_tick_get() == timer_call->expect_tick);
/* periodic timer can stop */
if (timer_call->is_static)
{
result = rt_timer_stop(&timer_call->static_timer);
}
else
{
result = rt_timer_stop(timer_call->dynamic_timer);
}
uassert_true(result == RT_EOK);
}
static void test_static_timer_control(void)
{
rt_err_t result;
int set_data;
int get_data;
timer.callbacks = 0;
timer.is_static = RT_TRUE;
rt_timer_init(&timer.static_timer,
"static_timer",
timer_control,
&timer,
5,
RT_TIMER_FLAG_PERIODIC);
/* test set data */
set_data = 10;
result = rt_timer_control(&timer.static_timer, RT_TIMER_CTRL_SET_TIME, &set_data);
uassert_true(result == RT_EOK);
/* test get data */
result = rt_timer_control(&timer.static_timer, RT_TIMER_CTRL_GET_TIME, &get_data);
uassert_true(result == RT_EOK);
uassert_true(set_data == get_data);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + set_data;
/* start timer */
result = rt_timer_start(&timer.static_timer);
uassert_true(result == RT_EOK);
rt_thread_delay(3 * set_data + 1);
/* detach timer */
result = rt_timer_detach(&timer.static_timer);
uassert_true(result == RT_EOK);
uassert_true(timer.callbacks == 1);
}
static void timer_start_in_callback(void *param)
{
rt_err_t result;
timer_struct *timer_call;
timer_call = (timer_struct *)param;
timer_call->callbacks++;
uassert_true(rt_tick_get() == timer_call->expect_tick);
if (timer_call->is_static)
{
timer_call->expect_tick = rt_tick_get() + timer_call->static_timer.init_tick;
result = rt_timer_start(&timer_call->static_timer);
}
else
{
timer_call->expect_tick = rt_tick_get() + timer_call->dynamic_timer->init_tick;
result = rt_timer_start(timer_call->dynamic_timer);
}
uassert_true(result == RT_EOK);
}
static void timer_start_stop_in_callback(void *param)
{
rt_err_t result;
timer_struct *timer_call;
timer_call = (timer_struct *)param;
timer_call->callbacks++;
uassert_true(rt_tick_get() == timer_call->expect_tick);
if (timer_call->is_static)
{
result = rt_timer_start(&timer_call->static_timer);
}
else
{
result = rt_timer_start(timer_call->dynamic_timer);
}
uassert_true(result == RT_EOK);
if (timer_call->is_static)
{
result = rt_timer_stop(&timer_call->static_timer);
}
else
{
result = rt_timer_stop(timer_call->dynamic_timer);
}
uassert_true(result == RT_EOK);
}
static void test_static_timer_op_in_callback(void)
{
rt_err_t result;
timer.callbacks = 0;
timer.is_static = RT_TRUE;
/* start in callback test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_oneshot); i++)
{
rt_timer_init(&timer.static_timer,
"static_timer",
timer_start_in_callback,
&timer,
time_out,
timer_flag_oneshot[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(&timer.static_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(5 * time_out + 1);
uassert_true(timer.callbacks >= 5);
/* detach timer */
result = rt_timer_detach(&timer.static_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
/* start & stop in callback test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_periodic); i++)
{
rt_timer_init(&timer.static_timer,
"static_timer",
timer_start_stop_in_callback,
&timer,
time_out,
timer_flag_periodic[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(&timer.static_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(3 * time_out + 1);
uassert_true(timer.callbacks == 1);
/* detach timer */
result = rt_timer_detach(&timer.static_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
}
#ifdef RT_USING_HEAP
static void test_dynamic_timer(void)
{
rt_err_t result;
timer.callbacks = 0;
timer.is_static = RT_FALSE;
/* one shot timer test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_oneshot); i++)
{
timer.dynamic_timer = rt_timer_create("dynamic_timer",
timer_oneshot,
&timer,
time_out,
timer_flag_oneshot[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(timer.dynamic_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(3 * time_out + 1);
uassert_true(timer.callbacks == 1);
/* detach timer */
result = rt_timer_delete(timer.dynamic_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
/* periodic timer test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_periodic); i++)
{
timer.dynamic_timer = rt_timer_create("dynamic_timer",
timer_periodic,
&timer,
time_out,
timer_flag_periodic[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(timer.dynamic_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(5 * time_out + 1);
uassert_true(timer.callbacks >= 5);
/* detach timer */
result = rt_timer_delete(timer.dynamic_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
}
static void test_dynamic_timer_control(void)
{
rt_err_t result;
int set_data;
int get_data;
timer.callbacks = 0;
timer.is_static = RT_FALSE;
timer.dynamic_timer = rt_timer_create("dynamic_timer",
timer_control,
&timer,
5,
RT_TIMER_FLAG_PERIODIC);
/* test set data */
set_data = 10;
result = rt_timer_control(timer.dynamic_timer, RT_TIMER_CTRL_SET_TIME, &set_data);
uassert_true(result == RT_EOK);
/* test get data */
result = rt_timer_control(timer.dynamic_timer, RT_TIMER_CTRL_GET_TIME, &get_data);
uassert_true(result == RT_EOK);
uassert_true(set_data == get_data);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + set_data;
/* start timer */
result = rt_timer_start(timer.dynamic_timer);
uassert_true(result == RT_EOK);
rt_thread_delay(3 * set_data + 1);
/* detach timer */
result = rt_timer_delete(timer.dynamic_timer);
uassert_true(result == RT_EOK);
uassert_true(timer.callbacks == 1);
}
static void test_dynamic_timer_start_twice(void)
{
rt_err_t result;
timer.callbacks = 0;
timer.is_static = RT_FALSE;
/* timer start twice test */
for (int time_out = 2; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_oneshot); i++)
{
timer.dynamic_timer = rt_timer_create("dynamic_timer",
timer_oneshot,
&timer,
time_out,
timer_flag_oneshot[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(timer.dynamic_timer);
uassert_true(result == RT_EOK);
rt_thread_delay(1);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(timer.dynamic_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(3 * time_out + 1);
uassert_true(timer.callbacks == 1);
/* detach timer */
result = rt_timer_delete(timer.dynamic_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
}
static void test_dynamic_timer_op_in_callback(void)
{
rt_err_t result;
timer.callbacks = 0;
timer.is_static = RT_FALSE;
/* start in callback test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_oneshot); i++)
{
timer.dynamic_timer = rt_timer_create("dynamic_timer",
timer_start_in_callback,
&timer,
time_out,
timer_flag_oneshot[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(timer.dynamic_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(5 * time_out + 1);
uassert_true(timer.callbacks >= 5);
/* detach timer */
result = rt_timer_delete(timer.dynamic_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
/* start & stop in callback test */
for (int time_out = 1; time_out < 10; time_out++)
{
for (int i = 0; i < sizeof(timer_flag_periodic); i++)
{
timer.dynamic_timer = rt_timer_create("dynamic_timer",
timer_start_stop_in_callback,
&timer,
time_out,
timer_flag_periodic[i]);
/* calc expect tick */
timer.expect_tick = rt_tick_get() + time_out;
/* start timer */
result = rt_timer_start(timer.dynamic_timer);
uassert_true(result == RT_EOK);
/* wait for timerout */
rt_thread_delay(3 * time_out + 1);
uassert_true(timer.callbacks == 1);
/* detach timer */
result = rt_timer_delete(timer.dynamic_timer);
uassert_true(result == RT_EOK);
timer.callbacks = 0;
}
}
}
#endif /* RT_USING_HEAP */
#define TEST_TIME_S 60 // test 60 seconds
#define STRESS_TIMERS 100
static struct rt_timer stress_timer[STRESS_TIMERS];
static void timer_stress(void *param)
{
rt_timer_t stress_timer = (rt_timer_t)param;
if (rand() % 2 == 0)
{
rt_timer_start(stress_timer);
}
else
{
rt_timer_stop(stress_timer);
}
}
static void test_timer_stress(void)
{
rt_tick_t start;
rt_ubase_t iters = 0;
rt_ubase_t cur_tick;
rt_ubase_t next_print_time;
LOG_I("timer stress test begin, it will take %d seconds", 3*TEST_TIME_S);
for (int i = 0; i < sizeof(timer_flag_periodic); i++)
{
for (int j = 0; j < STRESS_TIMERS; j++)
{
rt_timer_init(&stress_timer[j],
"stress_timer",
timer_stress,
&stress_timer[j],
j + 1,
timer_flag_periodic[i]);
}
start = rt_tick_get();
cur_tick = rt_tick_get();
next_print_time = cur_tick + RT_TICK_PER_SECOND;
while (cur_tick - start <= TEST_TIME_S * RT_TICK_PER_SECOND)
{
for (int j = 0; j < STRESS_TIMERS; j++)
{
if (rand() % 2 == 0)
{
rt_timer_start(&stress_timer[j]);
}
else
{
rt_timer_stop(&stress_timer[j]);
}
}
iters ++;
cur_tick = rt_tick_get();
if (cur_tick > next_print_time)
{
PRINT_PROGRESS(next_print_time);
next_print_time = cur_tick + RT_TICK_PER_SECOND;
}
}
for (int j = 0; j < STRESS_TIMERS; j++)
{
rt_timer_detach(&stress_timer[j]);
}
}
LOG_I("success after %lu iterations", iters);
}
static rt_err_t utest_tc_init(void)
{
timer.dynamic_timer = RT_NULL;
timer.callbacks = 0;
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
timer.dynamic_timer = RT_NULL;
timer.callbacks = 0;
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test_static_timer);
PRINT_PROGRESS(__LINE__);
UTEST_UNIT_RUN(test_static_timer_control);
PRINT_PROGRESS(__LINE__);
UTEST_UNIT_RUN(test_static_timer_start_twice);
PRINT_PROGRESS(__LINE__);
UTEST_UNIT_RUN(test_static_timer_op_in_callback);
PRINT_PROGRESS(__LINE__);
#ifdef RT_USING_HEAP
UTEST_UNIT_RUN(test_dynamic_timer);
PRINT_PROGRESS(__LINE__);
UTEST_UNIT_RUN(test_dynamic_timer_control);
PRINT_PROGRESS(__LINE__);
UTEST_UNIT_RUN(test_dynamic_timer_start_twice);
PRINT_PROGRESS(__LINE__);
UTEST_UNIT_RUN(test_dynamic_timer_op_in_callback);
PRINT_PROGRESS(__LINE__);
#endif /* RT_USING_HEAP */
UTEST_UNIT_RUN(test_timer_stress);
PRINT_PROGRESS(__LINE__);
}
UTEST_TC_EXPORT(testcase, "testcases.kernel.timer_tc", utest_tc_init, utest_tc_cleanup, 1000);
/*********************** end of file ****************************/