From 1342407bab6d1f2b93b47391a7ac27d4412126d1 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Thu, 29 Sep 2022 15:04:39 -0700 Subject: [PATCH 01/24] Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) --- CMakeLists.txt | 364 ++++++++++++++++++++++------------------ portable/CMakeLists.txt | 28 ++++ 2 files changed, 229 insertions(+), 163 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5b048d224c5..d622fc8a51e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,28 +1,27 @@ cmake_minimum_required(VERSION 3.15) -# User is responsible to set two mandatory options: -# FREERTOS_CONFIG_FILE_DIRECTORY -# FREERTOS_PORT +# User is responsible to one mandatory option: +# FREERTOS_PORT +# +# User is responsible for one library target: +# freertos_config ,typcially an INTERFACE library # # User can choose which heap implementation to use (either the implementations # included with FreeRTOS [1..5] or a custom implementation ) by providing the # option FREERTOS_HEAP. If the option is not set, the cmake will default to # using heap_4.c. -# Absolute path to FreeRTOS config file directory -set(FREERTOS_CONFIG_FILE_DIRECTORY "" CACHE STRING "Absolute path to the directory with FreeRTOSConfig.h") - -if(NOT FREERTOS_CONFIG_FILE_DIRECTORY) - message(FATAL_ERROR " FreeRTOSConfig.h file directory not specified. Please specify absolute path to it from top-level CMake file:\n" - " set(FREERTOS_CONFIG_FILE_DIRECTORY CACHE STRING \"\")\n" - " or from CMake command line option:\n" - " -DFREERTOS_CONFIG_FILE_DIRECTORY='/absolute_path/to/FreeRTOSConfig.h/directory'") -elseif(NOT EXISTS ${FREERTOS_CONFIG_FILE_DIRECTORY}/FreeRTOSConfig.h) - message(FATAL_ERROR " FreeRTOSConfig.h file not found in the directory specified (${FREERTOS_CONFIG_FILE_DIRECTORY})\n" - " Please specify absolute path to it from top-level CMake file:\n" - " set(FREERTOS_CONFIG_FILE_DIRECTORY CACHE STRING \"\")\n" - " or from CMake command line option:\n" - " -DFREERTOS_CONFIG_FILE_DIRECTORY='/absolute_path/to/FreeRTOSConfig.h/directory'") +# FreeRTOS::Config target - typically INTERFACE - defines the path to FreeRTOSConfig.h and optionally other freertos based config files +if(NOT TARGET freertos_config ) + message(FATAL_ERROR " freertos_config target not specified. Please specify a cmake target that defines the include directory for FreeRTOSConfig.h:\n" + " add_library(freertos_config INTERFACE)\n" + " add_library(FreeRTOS::Config ALIAS freertos_config)\n" + " target_include_directories(freertos_config SYSTEM\n" + " INTERFACE\n" + " include) # The config file directory\n" + " target_compile_definitions(freertos_config\n" + " PUBLIC\n" + " projCOVERAGE_TEST=0)\n") endif() # Heap number or absolute path to custom heap implementation provided by user @@ -38,150 +37,170 @@ if(NOT FREERTOS_PORT) " -DFREERTOS_PORT=GCC_ARM_CM4F\n" " \n" " Available port options:\n" - " BCC_16BIT_DOS_FLSH186 - Compiller: BCC Target: 16 bit DOS Flsh186\n" - " BCC_16BIT_DOS_PC - Compiller: BCC Target: 16 bit DOS PC\n" - " CCS_ARM_CM3 - Compiller: CCS Target: ARM Cortex-M3\n" - " CCS_ARM_CM4F - Compiller: CCS Target: ARM Cortex-M4 with FPU\n" - " CCS_ARM_CR4 - Compiller: CCS Target: ARM Cortex-R4\n" - " CCS_MSP430X - Compiller: CCS Target: MSP430X\n" - " CODEWARRIOR_COLDFIRE_V1 - Compiller: CoreWarrior Target: ColdFire V1\n" - " CODEWARRIOR_COLDFIRE_V2 - Compiller: CoreWarrior Target: ColdFire V2\n" - " CODEWARRIOR_HCS12 - Compiller: CoreWarrior Target: HCS12\n" - " GCC_ARM_CA9 - Compiller: GCC Target: ARM Cortex-A9\n" - " GCC_ARM_CA53_64_BIT - Compiller: GCC Target: ARM Cortex-A53 64 bit\n" - " GCC_ARM_CA53_64_BIT_SRE - Compiller: GCC Target: ARM Cortex-A53 64 bit SRE\n" - " GCC_ARM_CM0 - Compiller: GCC Target: ARM Cortex-M0\n" - " GCC_ARM_CM3 - Compiller: GCC Target: ARM Cortex-M3\n" - " GCC_ARM_CM3_MPU - Compiller: GCC Target: ARM Cortex-M3 with MPU\n" - " GCC_ARM_CM4_MPU - Compiller: GCC Target: ARM Cortex-M4 with MPU\n" - " GCC_ARM_CM4F - Compiller: GCC Target: ARM Cortex-M4 with FPU\n" - " GCC_ARM_CM7 - Compiller: GCC Target: ARM Cortex-M7\n" - " GCC_ARM_CM23_NONSECURE - Compiller: GCC Target: ARM Cortex-M23 non-secure\n" - " GCC_ARM_CM23_SECURE - Compiller: GCC Target: ARM Cortex-M23 secure\n" - " GCC_ARM_CM23_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M23 non-trustzone non-secure\n" - " GCC_ARM_CM33_NONSECURE - Compiller: GCC Target: ARM Cortex-M33 non-secure\n" - " GCC_ARM_CM33_SECURE - Compiller: GCC Target: ARM Cortex-M33 secure\n" - " GCC_ARM_CM33_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M33 non-trustzone non-secure\n" - " GCC_ARM_CM33_TFM - Compiller: GCC Target: ARM Cortex-M33 non-secure for TF-M\n" - " GCC_ARM_CM55_NONSECURE - Compiller: GCC Target: ARM Cortex-M55 non-secure\n" - " GCC_ARM_CM55_SECURE - Compiller: GCC Target: ARM Cortex-M55 secure\n" - " GCC_ARM_CM55_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M55 non-trustzone non-secure\n" - " GCC_ARM_CM55_TFM - Compiller: GCC Target: ARM Cortex-M55 non-secure for TF-M\n" - " GCC_ARM_CM85_NONSECURE - Compiller: GCC Target: ARM Cortex-M85 non-secure\n" - " GCC_ARM_CM85_SECURE - Compiller: GCC Target: ARM Cortex-M85 secure\n" - " GCC_ARM_CM85_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M85 non-trustzone non-secure\n" - " GCC_ARM_CM85_TFM - Compiller: GCC Target: ARM Cortex-M85 non-secure for TF-M\n" - " GCC_ARM_CR5 - Compiller: GCC Target: ARM Cortex-R5\n" - " GCC_ARM_CRX_NOGIC - Compiller: GCC Target: ARM Cortex-Rx no GIC\n" - " GCC_ARM7_AT91FR40008 - Compiller: GCC Target: ARM7 Atmel AT91R40008\n" - " GCC_ARM7_AT91SAM7S - Compiller: GCC Target: ARM7 Atmel AT91SAM7S\n" - " GCC_ARM7_LPC2000 - Compiller: GCC Target: ARM7 LPC2000\n" - " GCC_ARM7_LPC23XX - Compiller: GCC Target: ARM7 LPC23xx\n" - " GCC_ATMEGA323 - Compiller: GCC Target: ATMega323\n" - " GCC_AVR32_UC3 - Compiller: GCC Target: AVR32 UC3\n" - " GCC_COLDFIRE_V2 - Compiller: GCC Target: ColdFire V2\n" - " GCC_CORTUS_APS3 - Compiller: GCC Target: CORTUS APS3\n" - " GCC_H8S2329 - Compiller: GCC Target: H8S2329\n" - " GCC_HCS12 - Compiller: GCC Target: HCS12\n" - " GCC_IA32_FLAT - Compiller: GCC Target: IA32 flat\n" - " GCC_MICROBLAZE - Compiller: GCC Target: MicroBlaze\n" - " GCC_MICROBLAZE_V8 - Compiller: GCC Target: MicroBlaze V8\n" - " GCC_MICROBLAZE_V9 - Compiller: GCC Target: MicroBlaze V9\n" - " GCC_MSP430F449 - Compiller: GCC Target: MSP430F449\n" - " GCC_NIOSII - Compiller: GCC Target: NiosII\n" - " GCC_PPC405_XILINX - Compiller: GCC Target: Xilinx PPC405\n" - " GCC_PPC440_XILINX - Compiller: GCC Target: Xilinx PPC440\n" - " GCC_RISC_V - Compiller: GCC Target: RISC-V\n" - " GCC_RISC_V_PULPINO_VEGA_RV32M1RM - Compiller: GCC Target: RISC-V Pulpino Vega RV32M1RM\n" - " GCC_RL78 - Compiller: GCC Target: Renesas RL78\n" - " GCC_RX100 - Compiller: GCC Target: Renesas RX100\n" - " GCC_RX200 - Compiller: GCC Target: Renesas RX200\n" - " GCC_RX600 - Compiller: GCC Target: Renesas RX600\n" - " GCC_RX600_V2 - Compiller: GCC Target: Renesas RX600 v2\n" - " GCC_RX700_V3_DPFPU - Compiller: GCC Target: Renesas RX700 v3 with DPFPU\n" - " GCC_STR75X - Compiller: GCC Target: STR75x\n" - " GCC_TRICORE_1782 - Compiller: GCC Target: TriCore 1782\n" - " GCC_ARC_EM_HS - Compiller: GCC Target: DesignWare ARC EM HS\n" - " GCC_ARC_V1 - Compiller: GCC Target: DesignWare ARC v1\n" - " GCC_ATMEGA - Compiller: GCC Target: ATmega\n" - " GCC_POSIX - Compiller: GCC Target: Posix\n" - " GCC_RP2040 - Compiller: GCC Target: RP2040 ARM Cortex-M0+\n" - " GCC_XTENSA_ESP32 - Compiller: GCC Target: Xtensa ESP32\n" - " GCC_AVRDX - Compiller: GCC Target: AVRDx\n" - " GCC_AVR_MEGA0 - Compiller: GCC Target: AVR Mega0\n" - " IAR_78K0K - Compiller: IAR Target: Renesas 78K0K\n" - " IAR_ARM_CA5_NOGIC - Compiller: IAR Target: ARM Cortex-A5 no GIC\n" - " IAR_ARM_CA9 - Compiller: IAR Target: ARM Cortex-A9\n" - " IAR_ARM_CM0 - Compiller: IAR Target: ARM Cortex-M0\n" - " IAR_ARM_CM3 - Compiller: IAR Target: ARM Cortex-M3\n" - " IAR_ARM_CM4F - Compiller: IAR Target: ARM Cortex-M4 with FPU\n" - " IAR_ARM_CM4F_MPU - Compiller: IAR Target: ARM Cortex-M4 with FPU and MPU\n" - " IAR_ARM_CM7 - Compiller: IAR Target: ARM Cortex-M7\n" - " IAR_ARM_CM23_NONSECURE - Compiller: IAR Target: ARM Cortex-M23 non-secure\n" - " IAR_ARM_CM23_SECURE - Compiller: IAR Target: ARM Cortex-M23 secure\n" - " IAR_ARM_CM23_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M23 non-trustzone non-secure\n" - " IAR_ARM_CM33_NONSECURE - Compiller: IAR Target: ARM Cortex-M33 non-secure\n" - " IAR_ARM_CM33_SECURE - Compiller: IAR Target: ARM Cortex-M33 secure\n" - " IAR_ARM_CM33_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M33 non-trustzone non-secure\n" - " IAR_ARM_CM55_NONSECURE - Compiller: IAR Target: ARM Cortex-M55 non-secure\n" - " IAR_ARM_CM55_SECURE - Compiller: IAR Target: ARM Cortex-M55 secure\n" - " IAR_ARM_CM55_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M55 non-trustzone non-secure\n" - " IAR_ARM_CM85_NONSECURE - Compiller: IAR Target: ARM Cortex-M85 non-secure\n" - " IAR_ARM_CM85_SECURE - Compiller: IAR Target: ARM Cortex-M85 secure\n" - " IAR_ARM_CM85_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M85 non-trustzone non-secure\n" - " IAR_ARM_CRX_NOGIC - Compiller: IAR Target: ARM Cortex-Rx no GIC\n" - " IAR_ATMEGA323 - Compiller: IAR Target: ATMega323\n" - " IAR_ATMEL_SAM7S64 - Compiller: IAR Target: Atmel SAM7S64\n" - " IAR_ATMEL_SAM9XE - Compiller: IAR Target: Atmel SAM9XE\n" - " IAR_AVR_AVRDX - Compiller: IAR Target: AVRDx\n" - " IAR_AVR_MEGA0 - Compiller: IAR Target: AVR Mega0\n" - " IAR_AVR32_UC3 - Compiller: IAR Target: AVR32 UC3\n" - " IAR_LPC2000 - Compiller: IAR Target: LPC2000\n" - " IAR_MSP430 - Compiller: IAR Target: MSP430\n" - " IAR_MSP430X - Compiller: IAR Target: MSP430X\n" - " IAR_RISC_V - Compiller: IAR Target: RISC-V\n" - " IAR_RL78 - Compiller: IAR Target: Renesas RL78\n" - " IAR_RX100 - Compiller: IAR Target: Renesas RX100\n" - " IAR_RX600 - Compiller: IAR Target: Renesas RX600\n" - " IAR_RX700_V3_DPFPU - Compiller: IAR Target: Renesas RX700 v3 with DPFPU\n" - " IAR_RX_V2 - Compiller: IAR Target: Renesas RX v2\n" - " IAR_STR71X - Compiller: IAR Target: STR71x\n" - " IAR_STR75X - Compiller: IAR Target: STR75x\n" - " IAR_STR91X - Compiller: IAR Target: STR91x\n" - " IAR_V850ES_FX3 - Compiller: IAR Target: Renesas V850ES/Fx3\n" - " IAR_V850ES_HX3 - Compiller: IAR Target: Renesas V850ES/Hx3\n" - " MIKROC_ARM_CM4F - Compiller: MikroC Target: ARM Cortex-M4 with FPU\n" - " MPLAB_PIC18F - Compiller: MPLAB Target: PIC18F\n" - " MPLAB_PIC24 - Compiller: MPLAB Target: PIC24\n" - " MPLAB_PIC32MEC14XX - Compiller: MPLAB Target: PIC32MEC14xx\n" - " MPLAB_PIC32MX - Compiller: MPLAB Target: PIC32MX\n" - " MPLAB_PIC32MZ - Compiller: MPLAB Target: PIC32MZ\n" - " MSVC_MINGW - Compiller: MSVC or MinGW Target: x86\n" - " OWATCOM_16BIT_DOS_FLSH186 - Compiller: Open Watcom Target: 16 bit DOS Flsh186\n" - " OWATCOM_16BIT_DOS_PC - Compiller: Open Watcom Target: 16 bit DOS PC\n" - " PARADIGM_TERN_EE_LARGE - Compiller: Paradigm Target: Tern EE large\n" - " PARADIGM_TERN_EE_SMALL - Compiller: Paradigm Target: Tern EE small\n" - " RENESAS_RX100 - Compiller: Renesas Target: RX100\n" - " RENESAS_RX200 - Compiller: Renesas Target: RX200\n" - " RENESAS_RX600 - Compiller: Renesas Target: RX600\n" - " RENESAS_RX600_V2 - Compiller: Renesas Target: RX600 v2\n" - " RENESAS_RX700_V3_DPFPU - Compiller: Renesas Target: RX700 v3 with DPFPU\n" - " RENESAS_SH2A_FPU - Compiller: Renesas Target: SH2A with FPU\n" - " ROWLEY_MSP430F449 - Compiller: Rowley Target: MSP430F449\n" - " RVDS_ARM_CA9 - Compiller: RVDS Target: ARM Cortex-A9\n" - " RVDS_ARM_CM0 - Compiller: RVDS Target: ARM Cortex-M0\n" - " RVDS_ARM_CM3 - Compiller: RVDS Target: ARM Cortex-M3\n" - " RVDS_ARM_CM4_MPU - Compiller: RVDS Target: ARM Cortex-M4 with MPU\n" - " RVDS_ARM_CM4F - Compiller: RVDS Target: ARM Cortex-M4 with FPU\n" - " RVDS_ARM_CM7 - Compiller: RVDS Target: ARM Cortex-M7\n" - " RVDS_ARM7_LPC21XX - Compiller: RVDS Target: ARM7 LPC21xx\n" - " SDCC_CYGNAL - Compiller: SDCC Target: Cygnal\n" - " SOFTUNE_MB91460 - Compiller: Softune Target: MB91460\n" - " SOFTUNE_MB96340 - Compiller: Softune Target: MB96340\n" - " TASKING_ARM_CM4F - Compiller: Tasking Target: ARM Cortex-M4 with FPU\n" - " CDK_THEAD_CK802 - Compiller: CDK Target: T-head CK802\n" - " XCC_XTENSA - Compiller: XCC Target: Xtensa\n" - " WIZC_PIC18 - Compiller: WizC Target: PIC18") + " A_CUSTOM_PORT - Compiler: UserDefined Target: User Defined\n" + " BCC_16BIT_DOS_FLSH186 - Compiler: BCC Target: 16 bit DOS Flsh186\n" + " BCC_16BIT_DOS_PC - Compiler: BCC Target: 16 bit DOS PC\n" + " CCS_ARM_CM3 - Compiler: CCS Target: ARM Cortex-M3\n" + " CCS_ARM_CM4F - Compiler: CCS Target: ARM Cortex-M4 with FPU\n" + " CCS_ARM_CR4 - Compiler: CCS Target: ARM Cortex-R4\n" + " CCS_MSP430X - Compiler: CCS Target: MSP430X\n" + " CODEWARRIOR_COLDFIRE_V1 - Compiler: CoreWarrior Target: ColdFire V1\n" + " CODEWARRIOR_COLDFIRE_V2 - Compiler: CoreWarrior Target: ColdFire V2\n" + " CODEWARRIOR_HCS12 - Compiler: CoreWarrior Target: HCS12\n" + " GCC_ARM_CA9 - Compiler: GCC Target: ARM Cortex-A9\n" + " GCC_ARM_CA53_64_BIT - Compiler: GCC Target: ARM Cortex-A53 64 bit\n" + " GCC_ARM_CA53_64_BIT_SRE - Compiler: GCC Target: ARM Cortex-A53 64 bit SRE\n" + " GCC_ARM_CM0 - Compiler: GCC Target: ARM Cortex-M0\n" + " GCC_ARM_CM3 - Compiler: GCC Target: ARM Cortex-M3\n" + " GCC_ARM_CM3_MPU - Compiler: GCC Target: ARM Cortex-M3 with MPU\n" + " GCC_ARM_CM4_MPU - Compiler: GCC Target: ARM Cortex-M4 with MPU\n" + " GCC_ARM_CM4F - Compiler: GCC Target: ARM Cortex-M4 with FPU\n" + " GCC_ARM_CM7 - Compiler: GCC Target: ARM Cortex-M7\n" + " GCC_ARM_CM23_NONSECURE - Compiler: GCC Target: ARM Cortex-M23 non-secure\n" + " GCC_ARM_CM23_SECURE - Compiler: GCC Target: ARM Cortex-M23 secure\n" + " GCC_ARM_CM23_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M23 non-trustzone non-secure\n" + " GCC_ARM_CM33_NONSECURE - Compiler: GCC Target: ARM Cortex-M33 non-secure\n" + " GCC_ARM_CM33_SECURE - Compiler: GCC Target: ARM Cortex-M33 secure\n" + " GCC_ARM_CM33_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M33 non-trustzone non-secure\n" + " GCC_ARM_CM33_TFM - Compiler: GCC Target: ARM Cortex-M33 non-secure for TF-M\n" + " GCC_ARM_CM55_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-secure\n" + " GCC_ARM_CM55_SECURE - Compiler: GCC Target: ARM Cortex-M55 secure\n" + " GCC_ARM_CM55_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-trustzone non-secure\n" + " GCC_ARM_CM55_TFM - Compiler: GCC Target: ARM Cortex-M55 non-secure for TF-M\n" + " GCC_ARM_CM85_NONSECURE - Compiler: GCC Target: ARM Cortex-M85 non-secure\n" + " GCC_ARM_CM85_SECURE - Compiler: GCC Target: ARM Cortex-M85 secure\n" + " GCC_ARM_CM85_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M85 non-trustzone non-secure\n" + " GCC_ARM_CM85_TFM - Compiler: GCC Target: ARM Cortex-M85 non-secure for TF-M\n" + " GCC_ARM_CR5 - Compiler: GCC Target: ARM Cortex-R5\n" + " GCC_ARM_CRX_NOGIC - Compiler: GCC Target: ARM Cortex-Rx no GIC\n" + " GCC_ARM7_AT91FR40008 - Compiler: GCC Target: ARM7 Atmel AT91R40008\n" + " GCC_ARM7_AT91SAM7S - Compiler: GCC Target: ARM7 Atmel AT91SAM7S\n" + " GCC_ARM7_LPC2000 - Compiler: GCC Target: ARM7 LPC2000\n" + " GCC_ARM7_LPC23XX - Compiler: GCC Target: ARM7 LPC23xx\n" + " GCC_ATMEGA323 - Compiler: GCC Target: ATMega323\n" + " GCC_AVR32_UC3 - Compiler: GCC Target: AVR32 UC3\n" + " GCC_COLDFIRE_V2 - Compiler: GCC Target: ColdFire V2\n" + " GCC_CORTUS_APS3 - Compiler: GCC Target: CORTUS APS3\n" + " GCC_H8S2329 - Compiler: GCC Target: H8S2329\n" + " GCC_HCS12 - Compiler: GCC Target: HCS12\n" + " GCC_IA32_FLAT - Compiler: GCC Target: IA32 flat\n" + " GCC_MICROBLAZE - Compiler: GCC Target: MicroBlaze\n" + " GCC_MICROBLAZE_V8 - Compiler: GCC Target: MicroBlaze V8\n" + " GCC_MICROBLAZE_V9 - Compiler: GCC Target: MicroBlaze V9\n" + " GCC_MSP430F449 - Compiler: GCC Target: MSP430F449\n" + " GCC_NIOSII - Compiler: GCC Target: NiosII\n" + " GCC_PPC405_XILINX - Compiler: GCC Target: Xilinx PPC405\n" + " GCC_PPC440_XILINX - Compiler: GCC Target: Xilinx PPC440\n" + " GCC_RISC_V - Compiler: GCC Target: RISC-V\n" + " GCC_RISC_V_PULPINO_VEGA_RV32M1RM - Compiler: GCC Target: RISC-V Pulpino Vega RV32M1RM\n" + " GCC_RL78 - Compiler: GCC Target: Renesas RL78\n" + " GCC_RX100 - Compiler: GCC Target: Renesas RX100\n" + " GCC_RX200 - Compiler: GCC Target: Renesas RX200\n" + " GCC_RX600 - Compiler: GCC Target: Renesas RX600\n" + " GCC_RX600_V2 - Compiler: GCC Target: Renesas RX600 v2\n" + " GCC_RX700_V3_DPFPU - Compiler: GCC Target: Renesas RX700 v3 with DPFPU\n" + " GCC_STR75X - Compiler: GCC Target: STR75x\n" + " GCC_TRICORE_1782 - Compiler: GCC Target: TriCore 1782\n" + " GCC_ARC_EM_HS - Compiler: GCC Target: DesignWare ARC EM HS\n" + " GCC_ARC_V1 - Compiler: GCC Target: DesignWare ARC v1\n" + " GCC_ATMEGA - Compiler: GCC Target: ATmega\n" + " GCC_POSIX - Compiler: GCC Target: Posix\n" + " GCC_RP2040 - Compiler: GCC Target: RP2040 ARM Cortex-M0+\n" + " GCC_XTENSA_ESP32 - Compiler: GCC Target: Xtensa ESP32\n" + " GCC_AVRDX - Compiler: GCC Target: AVRDx\n" + " GCC_AVR_MEGA0 - Compiler: GCC Target: AVR Mega0\n" + " IAR_78K0K - Compiler: IAR Target: Renesas 78K0K\n" + " IAR_ARM_CA5_NOGIC - Compiler: IAR Target: ARM Cortex-A5 no GIC\n" + " IAR_ARM_CA9 - Compiler: IAR Target: ARM Cortex-A9\n" + " IAR_ARM_CM0 - Compiler: IAR Target: ARM Cortex-M0\n" + " IAR_ARM_CM3 - Compiler: IAR Target: ARM Cortex-M3\n" + " IAR_ARM_CM4F - Compiler: IAR Target: ARM Cortex-M4 with FPU\n" + " IAR_ARM_CM4F_MPU - Compiler: IAR Target: ARM Cortex-M4 with FPU and MPU\n" + " IAR_ARM_CM7 - Compiler: IAR Target: ARM Cortex-M7\n" + " IAR_ARM_CM23_NONSECURE - Compiler: IAR Target: ARM Cortex-M23 non-secure\n" + " IAR_ARM_CM23_SECURE - Compiler: IAR Target: ARM Cortex-M23 secure\n" + " IAR_ARM_CM23_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M23 non-trustzone non-secure\n" + " IAR_ARM_CM33_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-secure\n" + " IAR_ARM_CM33_SECURE - Compiler: IAR Target: ARM Cortex-M33 secure\n" + " IAR_ARM_CM33_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-trustzone non-secure\n" + " IAR_ARM_CM55_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-secure\n" + " IAR_ARM_CM55_SECURE - Compiler: IAR Target: ARM Cortex-M55 secure\n" + " IAR_ARM_CM55_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-trustzone non-secure\n" + " IAR_ARM_CM85_NONSECURE - Compiler: IAR Target: ARM Cortex-M85 non-secure\n" + " IAR_ARM_CM85_SECURE - Compiler: IAR Target: ARM Cortex-M85 secure\n" + " IAR_ARM_CM85_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M85 non-trustzone non-secure\n" + " IAR_ARM_CRX_NOGIC - Compiler: IAR Target: ARM Cortex-Rx no GIC\n" + " IAR_ATMEGA323 - Compiler: IAR Target: ATMega323\n" + " IAR_ATMEL_SAM7S64 - Compiler: IAR Target: Atmel SAM7S64\n" + " IAR_ATMEL_SAM9XE - Compiler: IAR Target: Atmel SAM9XE\n" + " IAR_AVR_AVRDX - Compiler: IAR Target: AVRDx\n" + " IAR_AVR_MEGA0 - Compiler: IAR Target: AVR Mega0\n" + " IAR_AVR32_UC3 - Compiler: IAR Target: AVR32 UC3\n" + " IAR_LPC2000 - Compiler: IAR Target: LPC2000\n" + " IAR_MSP430 - Compiler: IAR Target: MSP430\n" + " IAR_MSP430X - Compiler: IAR Target: MSP430X\n" + " IAR_RISC_V - Compiler: IAR Target: RISC-V\n" + " IAR_RL78 - Compiler: IAR Target: Renesas RL78\n" + " IAR_RX100 - Compiler: IAR Target: Renesas RX100\n" + " IAR_RX600 - Compiler: IAR Target: Renesas RX600\n" + " IAR_RX700_V3_DPFPU - Compiler: IAR Target: Renesas RX700 v3 with DPFPU\n" + " IAR_RX_V2 - Compiler: IAR Target: Renesas RX v2\n" + " IAR_STR71X - Compiler: IAR Target: STR71x\n" + " IAR_STR75X - Compiler: IAR Target: STR75x\n" + " IAR_STR91X - Compiler: IAR Target: STR91x\n" + " IAR_V850ES_FX3 - Compiler: IAR Target: Renesas V850ES/Fx3\n" + " IAR_V850ES_HX3 - Compiler: IAR Target: Renesas V850ES/Hx3\n" + " MIKROC_ARM_CM4F - Compiler: MikroC Target: ARM Cortex-M4 with FPU\n" + " MPLAB_PIC18F - Compiler: MPLAB Target: PIC18F\n" + " MPLAB_PIC24 - Compiler: MPLAB Target: PIC24\n" + " MPLAB_PIC32MEC14XX - Compiler: MPLAB Target: PIC32MEC14xx\n" + " MPLAB_PIC32MX - Compiler: MPLAB Target: PIC32MX\n" + " MPLAB_PIC32MZ - Compiler: MPLAB Target: PIC32MZ\n" + " MSVC_MINGW - Compiler: MSVC or MinGW Target: x86\n" + " OWATCOM_16BIT_DOS_FLSH186 - Compiler: Open Watcom Target: 16 bit DOS Flsh186\n" + " OWATCOM_16BIT_DOS_PC - Compiler: Open Watcom Target: 16 bit DOS PC\n" + " PARADIGM_TERN_EE_LARGE - Compiler: Paradigm Target: Tern EE large\n" + " PARADIGM_TERN_EE_SMALL - Compiler: Paradigm Target: Tern EE small\n" + " RENESAS_RX100 - Compiler: Renesas Target: RX100\n" + " RENESAS_RX200 - Compiler: Renesas Target: RX200\n" + " RENESAS_RX600 - Compiler: Renesas Target: RX600\n" + " RENESAS_RX600_V2 - Compiler: Renesas Target: RX600 v2\n" + " RENESAS_RX700_V3_DPFPU - Compiler: Renesas Target: RX700 v3 with DPFPU\n" + " RENESAS_SH2A_FPU - Compiler: Renesas Target: SH2A with FPU\n" + " ROWLEY_MSP430F449 - Compiler: Rowley Target: MSP430F449\n" + " RVDS_ARM_CA9 - Compiler: RVDS Target: ARM Cortex-A9\n" + " RVDS_ARM_CM0 - Compiler: RVDS Target: ARM Cortex-M0\n" + " RVDS_ARM_CM3 - Compiler: RVDS Target: ARM Cortex-M3\n" + " RVDS_ARM_CM4_MPU - Compiler: RVDS Target: ARM Cortex-M4 with MPU\n" + " RVDS_ARM_CM4F - Compiler: RVDS Target: ARM Cortex-M4 with FPU\n" + " RVDS_ARM_CM7 - Compiler: RVDS Target: ARM Cortex-M7\n" + " RVDS_ARM7_LPC21XX - Compiler: RVDS Target: ARM7 LPC21xx\n" + " SDCC_CYGNAL - Compiler: SDCC Target: Cygnal\n" + " SOFTUNE_MB91460 - Compiler: Softune Target: MB91460\n" + " SOFTUNE_MB96340 - Compiler: Softune Target: MB96340\n" + " TASKING_ARM_CM4F - Compiler: Tasking Target: ARM Cortex-M4 with FPU\n" + " CDK_THEAD_CK802 - Compiler: CDK Target: T-head CK802\n" + " XCC_XTENSA - Compiler: XCC Target: Xtensa\n" + " WIZC_PIC18 - Compiler: WizC Target: PIC18") +elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_port) ) + message(FATAL_ERROR " FREERTOS_PORT is set to A_CUSTOM_PORT. Please specify the custom port target with all necessary files. For example:\n" + " Assuming a directory of:\n" + " FreeRTOSCustomPort/\n" + " CMakeLists.txt\n" + " port.c\n" + " portmacro.h\n" + " Where FreeRTOSCustomPort/CMakeLists.txt is a modified version of:\n" + " add_library(freertos_kernel_port STATIC)\n" + " target_sources(freertos_kernel_port\n" + " PRIVATE\n" + " port.c\n" + " portmacro.h)\n" + " target_include_directories(freertos_kernel_port\n" + " PUBLIC\n" + " .)\n" + " taget_link_libraries(freertos_kernel_port\n" + " PRIVATE\n" + " freertos_kernel)") endif() add_subdirectory(portable) @@ -202,7 +221,26 @@ add_library(freertos_kernel STATIC target_include_directories(freertos_kernel PUBLIC include - ${FREERTOS_CONFIG_FILE_DIRECTORY} ) -target_link_libraries(freertos_kernel freertos_kernel_port) +target_compile_options( freertos_kernel + PRIVATE + $<$:-Wno-cast-align> + $<$:-Wno-covered-switch-default> + $<$:-Wno-documentation> + $<$:-Wno-extra-semi-stmt> + $<$:-Wno-int-to-pointer-cast> + $<$:-Wno-missing-noreturn> + $<$:-Wno-missing-variable-declarations> + $<$:-Wno-padded> + $<$:-Wno-pointer-to-int-cast> + $<$:-Wno-shorten-64-to-32> + $<$:-Wno-sign-conversion> + $<$:-Wno-unused-macros> +) + +target_link_libraries(freertos_kernel + PUBLIC + freertos_config + freertos_kernel_port +) diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index ea54ec4231f..41015d1a95e 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -1,5 +1,12 @@ # FreeRTOS internal cmake file. Do not use it in user top-level project +if (FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") + message(STATUS "Using a custom FREERTOS_PORT.") + return() +endif() + +# FreeRTOS internal cmake file. Do not use it in user top-level project + add_library(freertos_kernel_port STATIC # 16-Bit DOS ports for BCC $<$: @@ -983,12 +990,33 @@ target_include_directories(freertos_kernel_port PUBLIC $<$:${CMAKE_CURRENT_LIST_DIR}/WizC/PIC18> ) +if(FREERTOS_PORT STREQUAL GCC_POSIX) + find_package(Threads REQUIRED) +endif() + target_link_libraries(freertos_kernel_port PUBLIC $<$:pico_base_headers> $<$:idf::esp32> PRIVATE freertos_kernel + $<$:Threads::Threads> "$<$:hardware_clocks;hardware_exception>" $<$:winmm> # Windows library which implements timers ) + +# Note these are primarily for the POSIX port. +target_compile_options( freertos_kernel_port + PRIVATE + $<$:-Wno-disabled-macro-expansion> + $<$:-Wno-documentation> + $<$:-Wno-extra-semi-stmt> + $<$:-Wno-missing-noreturn> + $<$:-Wno-missing-variable-declarations> + $<$:-Wno-padded> + $<$:-Wno-pointer-to-int-cast> + $<$:-Wno-reserved-macro-identifier> + $<$:-Wno-sign-conversion> + $<$:-Wno-type-limits> + $<$:-Wno-unused-macros> +) From 1822d8eb9aeb5aedb66cbf56e4a7fca58d898009 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Fri, 30 Sep 2022 15:16:20 -0700 Subject: [PATCH 02/24] Using single name definition for libraries everywhere. (#558) --- CMakeLists.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d622fc8a51e..8d1d49b96e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,11 +11,10 @@ cmake_minimum_required(VERSION 3.15) # option FREERTOS_HEAP. If the option is not set, the cmake will default to # using heap_4.c. -# FreeRTOS::Config target - typically INTERFACE - defines the path to FreeRTOSConfig.h and optionally other freertos based config files +# `freertos_config` target defines the path to FreeRTOSConfig.h and optionally other freertos based config files if(NOT TARGET freertos_config ) message(FATAL_ERROR " freertos_config target not specified. Please specify a cmake target that defines the include directory for FreeRTOSConfig.h:\n" " add_library(freertos_config INTERFACE)\n" - " add_library(FreeRTOS::Config ALIAS freertos_config)\n" " target_include_directories(freertos_config SYSTEM\n" " INTERFACE\n" " include) # The config file directory\n" From 50a29efbec18e5b45f513695eb50a813de534e81 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Tue, 11 Oct 2022 12:27:08 -0700 Subject: [PATCH 03/24] Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) --- CMakeLists.txt | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8d1d49b96e4..b26bfab03b7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,14 +13,36 @@ cmake_minimum_required(VERSION 3.15) # `freertos_config` target defines the path to FreeRTOSConfig.h and optionally other freertos based config files if(NOT TARGET freertos_config ) - message(FATAL_ERROR " freertos_config target not specified. Please specify a cmake target that defines the include directory for FreeRTOSConfig.h:\n" - " add_library(freertos_config INTERFACE)\n" - " target_include_directories(freertos_config SYSTEM\n" - " INTERFACE\n" - " include) # The config file directory\n" - " target_compile_definitions(freertos_config\n" - " PUBLIC\n" - " projCOVERAGE_TEST=0)\n") + if (NOT DEFINED FREERTOS_CONFIG_FILE_DIRECTORY ) + + message(FATAL_ERROR " freertos_config target not specified. Please specify a cmake target that defines the include directory for FreeRTOSConfig.h:\n" + " add_library(freertos_config INTERFACE)\n" + " target_include_directories(freertos_config SYSTEM\n" + " INTERFACE\n" + " include) # The config file directory\n" + " target_compile_definitions(freertos_config\n" + " PUBLIC\n" + " projCOVERAGE_TEST=0)\n") + else() + message(WARNING " Using deprecated 'FREERTOS_CONFIG_FILE_DIRECTORY' - please update your project CMakeLists.txt file:\n" + " add_library(freertos_config INTERFACE)\n" + " target_include_directories(freertos_config SYSTEM\n" + " INTERFACE\n" + " include) # The config file directory\n" + " target_compile_definitions(freertos_config\n" + " PUBLIC\n" + " projCOVERAGE_TEST=0)\n") + # Currently will add this in here. + add_library(freertos_config INTERFACE) + target_include_directories(freertos_config SYSTEM + INTERFACE + ${FREERTOS_CONFIG_FILE_DIRECTORY} + ) + target_compile_definitions(freertos_config + PUBLIC + projCOVERAGE_TEST=0 + ) + endif() endif() # Heap number or absolute path to custom heap implementation provided by user From 06f9314f414860d748a2d9353bfe649cd7222865 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Wed, 12 Oct 2022 03:00:24 -0700 Subject: [PATCH 04/24] Removing compiler warnings for GNU and Clang. (#571) --- CMakeLists.txt | 16 ---------------- portable/CMakeLists.txt | 16 ---------------- 2 files changed, 32 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b26bfab03b7..13c48325cba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -244,22 +244,6 @@ target_include_directories(freertos_kernel include ) -target_compile_options( freertos_kernel - PRIVATE - $<$:-Wno-cast-align> - $<$:-Wno-covered-switch-default> - $<$:-Wno-documentation> - $<$:-Wno-extra-semi-stmt> - $<$:-Wno-int-to-pointer-cast> - $<$:-Wno-missing-noreturn> - $<$:-Wno-missing-variable-declarations> - $<$:-Wno-padded> - $<$:-Wno-pointer-to-int-cast> - $<$:-Wno-shorten-64-to-32> - $<$:-Wno-sign-conversion> - $<$:-Wno-unused-macros> -) - target_link_libraries(freertos_kernel PUBLIC freertos_config diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index 41015d1a95e..ae9de9cfe1b 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -1004,19 +1004,3 @@ target_link_libraries(freertos_kernel_port "$<$:hardware_clocks;hardware_exception>" $<$:winmm> # Windows library which implements timers ) - -# Note these are primarily for the POSIX port. -target_compile_options( freertos_kernel_port - PRIVATE - $<$:-Wno-disabled-macro-expansion> - $<$:-Wno-documentation> - $<$:-Wno-extra-semi-stmt> - $<$:-Wno-missing-noreturn> - $<$:-Wno-missing-variable-declarations> - $<$:-Wno-padded> - $<$:-Wno-pointer-to-int-cast> - $<$:-Wno-reserved-macro-identifier> - $<$:-Wno-sign-conversion> - $<$:-Wno-type-limits> - $<$:-Wno-unused-macros> -) From a3db429cc09433a99b1c3ac1382fb83d9681f09d Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Sun, 23 Oct 2022 16:46:05 -0700 Subject: [PATCH 05/24] Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. --- CMakeLists.txt | 16 +++++++++++----- README.md | 49 +++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 13c48325cba..7a3e081476d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.15) # User is responsible to one mandatory option: -# FREERTOS_PORT +# FREERTOS_PORT, if not specified and native port detected, uses the native compile. # # User is responsible for one library target: # freertos_config ,typcially an INTERFACE library @@ -49,16 +49,14 @@ endif() set(FREERTOS_HEAP "4" CACHE STRING "FreeRTOS heap model number. 1 .. 5. Or absolute path to custom heap source file") # FreeRTOS port option -set(FREERTOS_PORT "" CACHE STRING "FreeRTOS port name") - if(NOT FREERTOS_PORT) - message(FATAL_ERROR " FREERTOS_PORT is not set. Please specify it from top-level CMake file (example):\n" + message(WARNING " FREERTOS_PORT is not set. Please specify it from top-level CMake file (example):\n" " set(FREERTOS_PORT GCC_ARM_CM4F CACHE STRING \"\")\n" " or from CMake command line option:\n" " -DFREERTOS_PORT=GCC_ARM_CM4F\n" " \n" " Available port options:\n" - " A_CUSTOM_PORT - Compiler: UserDefined Target: User Defined\n" + " A_CUSTOM_PORT - Compiler: User Defined Target: User Defined\n" " BCC_16BIT_DOS_FLSH186 - Compiler: BCC Target: 16 bit DOS Flsh186\n" " BCC_16BIT_DOS_PC - Compiler: BCC Target: 16 bit DOS PC\n" " CCS_ARM_CM3 - Compiler: CCS Target: ARM Cortex-M3\n" @@ -203,6 +201,14 @@ if(NOT FREERTOS_PORT) " CDK_THEAD_CK802 - Compiler: CDK Target: T-head CK802\n" " XCC_XTENSA - Compiler: XCC Target: Xtensa\n" " WIZC_PIC18 - Compiler: WizC Target: PIC18") + # Native FREERTOS_PORT for Linux and Windows MINGW builds + if(UNIX) + message(STATUS " Auto-Detected Unix, setting FREERTOS_PORT=GCC_POSIX") + set(FREERTOS_PORT GCC_POSIX CACHE STRING "FreeRTOS port name") + elseif(MINGW) + message(STATUS " Auto-Detected MINGW, setting FREERTOS_PORT=MSVC_MINGW") + set(FREERTOS_PORT MSVC_MINGW CACHE STRING "FreeRTOS port name") + endif() elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_port) ) message(FATAL_ERROR " FREERTOS_PORT is set to A_CUSTOM_PORT. Please specify the custom port target with all necessary files. For example:\n" " Assuming a directory of:\n" diff --git a/README.md b/README.md index 52d78dd7924..d147f884077 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,53 @@ Additionally, for FreeRTOS kernel feature information refer to the [Developer Do ### Getting help If you have any questions or need assistance troubleshooting your FreeRTOS project, we have an active community that can help on the [FreeRTOS Community Support Forum](https://forums.freertos.org). -## Cloning this repository +## To consume FreeRTOS-Kernel + +### Consume with CMake +If using CMake, it is recommended to use this repository using FetchContent. +Add the following into your project's main or a subdirectory's `CMakeLists.txt`: + +- Define the source and version/tag you want to use: + +```cmake +FetchContent_Declare( freertos_kernel + GIT_REPOSITORY https://github.com/FreeRTOS/FreeRTOS-Kernel.git + GIT_TAG master #Note: Best practice to use specific git-hash or tagged version +) +``` + +- Add a freertos_config library (typically an INTERFACE library) The following assumes the directory structure: + - `include/FreeRTOSConfig.h` +```cmake +add_library(freertos_config INTERFACE) + +target_include_directories(freertos_config SYSTEM +INTERFACE + include +) + +target_compile_definitions(freertos_config + INTERFACE + projCOVERAGE_TEST=0 +) +``` + +- Configure the FreeRTOS-Kernel and make it available + - this particular example supports a native and cross-compiled build option. + +```cmake +set( FREERTOS_HEAP "4" CACHE STRING "" FORCE) +# Select the native compile PORT +set( FREERTOS_PORT "GCC_POSIX" CACHE STRING "" FORCE) +# Select the cross-compile PORT +if (CMAKE_CROSSCOMPILING) + set(FREERTOS_PORT "GCC_ARM_CA9" CACHE STRING "" FORCE) +endif() + +FetchContent_MakeAvailable(freertos_kernel) +``` + +### Consuming stand-alone - Cloning this repository To clone using HTTPS: ``` @@ -36,4 +82,3 @@ FreeRTOS files are formatted using the "uncrustify" tool. The configuration file ### Spelling *lexicon.txt* contains words that are not traditionally found in an English dictionary. It is used by the spellchecker to verify the various jargon, variable names, and other odd words used in the FreeRTOS code base. If your pull request fails to pass the spelling and you believe this is a mistake, then add the word to *lexicon.txt*. Note that only the FreeRTOS Kernel source files are checked for proper spelling, the portable section is ignored. - From b2c564cd1045d1ed9daf9f436204edcfeb95e981 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Fri, 4 Nov 2022 16:23:52 -0700 Subject: [PATCH 06/24] Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. --- CMakeLists.txt | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7a3e081476d..205c19a4817 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,6 +6,8 @@ cmake_minimum_required(VERSION 3.15) # User is responsible for one library target: # freertos_config ,typcially an INTERFACE library # +# DEPRECATED: FREERTOS_CONFIG_FILE_DIRECTORY - but still supported if no freertos_config defined for now. +# May be removed at some point in the future. # User can choose which heap implementation to use (either the implementations # included with FreeRTOS [1..5] or a custom implementation ) by providing the # option FREERTOS_HEAP. If the option is not set, the cmake will default to @@ -32,16 +34,6 @@ if(NOT TARGET freertos_config ) " target_compile_definitions(freertos_config\n" " PUBLIC\n" " projCOVERAGE_TEST=0)\n") - # Currently will add this in here. - add_library(freertos_config INTERFACE) - target_include_directories(freertos_config SYSTEM - INTERFACE - ${FREERTOS_CONFIG_FILE_DIRECTORY} - ) - target_compile_definitions(freertos_config - PUBLIC - projCOVERAGE_TEST=0 - ) endif() endif() @@ -248,10 +240,12 @@ add_library(freertos_kernel STATIC target_include_directories(freertos_kernel PUBLIC include + # Note: DEPRECATED but still supported, may be removed in a future release. + $<$>:${FREERTOS_CONFIG_FILE_DIRECTORY}> ) target_link_libraries(freertos_kernel PUBLIC - freertos_config + $<$:freertos_config> freertos_kernel_port ) From 8c3c2c16d24a57fd159a583cfc1ab28307bb76d9 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Thu, 13 Oct 2022 13:33:44 -0700 Subject: [PATCH 07/24] Fixing clang and gnu compiler warnings. --- event_groups.c | 10 +- include/croutine.h | 10 +- include/list.h | 36 +++---- include/queue.h | 6 +- include/task.h | 2 +- portable/GCC/RX100/portmacro.h | 2 +- portable/GCC/RX200/portmacro.h | 2 +- portable/GCC/RX600/portmacro.h | 2 +- portable/GCC/RX600v2/portmacro.h | 2 +- portable/GCC/RX700v3_DPFPU/portmacro.h | 2 +- portable/IAR/RX100/portmacro.h | 2 +- portable/IAR/RX600/portmacro.h | 2 +- portable/IAR/RX700v3_DPFPU/portmacro.h | 2 +- portable/IAR/RXv2/portmacro.h | 2 +- portable/MemMang/heap_4.c | 16 ++- portable/Renesas/RX100/portmacro.h | 2 +- portable/Renesas/RX200/portmacro.h | 2 +- portable/Renesas/RX600/portmacro.h | 2 +- portable/Renesas/RX600v2/portmacro.h | 2 +- portable/Renesas/RX700v3_DPFPU/portmacro.h | 2 +- portable/ThirdParty/GCC/Posix/port.c | 7 +- queue.c | 64 ++++++------ stream_buffer.c | 52 +++++----- tasks.c | 111 +++++++++++---------- timers.c | 24 +++-- 25 files changed, 195 insertions(+), 171 deletions(-) diff --git a/event_groups.c b/event_groups.c index 24c7b3781a1..6f486792bbb 100644 --- a/event_groups.c +++ b/event_groups.c @@ -64,14 +64,14 @@ typedef struct EventGroupDef_t { EventBits_t uxEventBits; - List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */ + List_t xTasksWaitingForBits; /**< List of tasks waiting for a bit to be set. */ #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxEventGroupNumber; #endif #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif } EventGroup_t; @@ -516,15 +516,15 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/include/croutine.h b/include/croutine.h index 63d1d94f742..e83d3a20b87 100644 --- a/include/croutine.h +++ b/include/croutine.h @@ -53,11 +53,11 @@ typedef void (* crCOROUTINE_CODE)( CoRoutineHandle_t, typedef struct corCoRoutineControlBlock { crCOROUTINE_CODE pxCoRoutineFunction; - ListItem_t xGenericListItem; /*< List item used to place the CRCB in ready and blocked queues. */ - ListItem_t xEventListItem; /*< List item used to place the CRCB in event lists. */ - UBaseType_t uxPriority; /*< The priority of the co-routine in relation to other co-routines. */ - UBaseType_t uxIndex; /*< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ - uint16_t uxState; /*< Used internally by the co-routine implementation. */ + ListItem_t xGenericListItem; /**< List item used to place the CRCB in ready and blocked queues. */ + ListItem_t xEventListItem; /**< List item used to place the CRCB in event lists. */ + UBaseType_t uxPriority; /**< The priority of the co-routine in relation to other co-routines. */ + UBaseType_t uxIndex; /**< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ + uint16_t uxState; /**< Used internally by the co-routine implementation. */ } CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ /** diff --git a/include/list.h b/include/list.h index 05ab2217dc3..851c7e4673e 100644 --- a/include/list.h +++ b/include/list.h @@ -143,20 +143,20 @@ struct xLIST; struct xLIST_ITEM { - listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ - configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in ascending order. */ - struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ - struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ - void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ - struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */ - listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /**< The value being listed. In most cases this is used to sort the list in ascending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /**< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /**< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /**< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + struct xLIST * configLIST_VOLATILE pxContainer; /**< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ }; typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ #if ( configUSE_MINI_LIST_ITEM == 1 ) struct xMINI_LIST_ITEM { - listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ configLIST_VOLATILE TickType_t xItemValue; struct xLIST_ITEM * configLIST_VOLATILE pxNext; struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; @@ -171,11 +171,11 @@ typedef struct xLIST_ITEM ListItem_t; /* For some reason lint */ typedef struct xLIST { - listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ volatile UBaseType_t uxNumberOfItems; - ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ - MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ - listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + ListItem_t * configLIST_VOLATILE pxIndex; /**< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + MiniListItem_t xListEnd; /**< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ } List_t; /* @@ -283,7 +283,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ - { \ + do { \ List_t * const pxConstList = ( pxList ); \ /* Increment the index to the next item and return the item, ensuring */ \ /* we don't return the marker used at the end of the list. */ \ @@ -293,7 +293,7 @@ typedef struct xLIST ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ } \ ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ - } + } while(0) /* * Version of uxListRemove() that does not return a value. Provided as a slight @@ -312,7 +312,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listREMOVE_ITEM( pxItemToRemove ) \ - { \ + do { \ /* The list item knows which list it is in. Obtain the list from the list \ * item. */ \ List_t * const pxList = ( pxItemToRemove )->pxContainer; \ @@ -327,7 +327,7 @@ typedef struct xLIST \ ( pxItemToRemove )->pxContainer = NULL; \ ( pxList->uxNumberOfItems )--; \ - } + } while(0) /* * Inline version of vListInsertEnd() to provide slight optimisation for @@ -352,7 +352,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listINSERT_END( pxList, pxNewListItem ) \ - { \ + do { \ ListItem_t * const pxIndex = ( pxList )->pxIndex; \ \ /* Only effective when configASSERT() is also defined, these tests may catch \ @@ -374,7 +374,7 @@ typedef struct xLIST ( pxNewListItem )->pxContainer = ( pxList ); \ \ ( ( pxList )->uxNumberOfItems )++; \ - } + } while(0) /* * Access function to obtain the owner of the first entry in a list. Lists diff --git a/include/queue.h b/include/queue.h index 426de468fe0..e67c4f6e3f8 100644 --- a/include/queue.h +++ b/include/queue.h @@ -1346,9 +1346,9 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * @param pvBuffer Pointer to the buffer into which the received item will * be copied. * - * @param pxTaskWoken A task may be blocked waiting for space to become - * available on the queue. If xQueueReceiveFromISR causes such a task to - * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will + * @param pxHigherPriorityTaskWoken A task may be blocked waiting for space to + * become available on the queue. If xQueueReceiveFromISR causes such a task + * to unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will * remain unchanged. * * @return pdTRUE if an item was successfully received from the queue, diff --git a/include/task.h b/include/task.h index a3548296b97..d7f4fc22678 100644 --- a/include/task.h +++ b/include/task.h @@ -658,7 +658,7 @@ typedef enum * * @param xTask The handle of the task being updated. * - * @param xRegions A pointer to a MemoryRegion_t structure that contains the + * @param[in] pxRegions A pointer to a MemoryRegion_t structure that contains the * new memory region definitions. * * Example usage: diff --git a/portable/GCC/RX100/portmacro.h b/portable/GCC/RX100/portmacro.h index 1246f94b474..520d17dbbb7 100644 --- a/portable/GCC/RX100/portmacro.h +++ b/portable/GCC/RX100/portmacro.h @@ -126,7 +126,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) /* Tickless idle/low power functionality. */ #if configUSE_TICKLESS_IDLE == 1 diff --git a/portable/GCC/RX200/portmacro.h b/portable/GCC/RX200/portmacro.h index cb697a75df1..09b3b49758d 100644 --- a/portable/GCC/RX200/portmacro.h +++ b/portable/GCC/RX200/portmacro.h @@ -129,7 +129,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600/portmacro.h b/portable/GCC/RX600/portmacro.h index 3a63cc36410..ec3dba37404 100644 --- a/portable/GCC/RX600/portmacro.h +++ b/portable/GCC/RX600/portmacro.h @@ -129,7 +129,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600v2/portmacro.h b/portable/GCC/RX600v2/portmacro.h index 3a63cc36410..ec3dba37404 100644 --- a/portable/GCC/RX600v2/portmacro.h +++ b/portable/GCC/RX600v2/portmacro.h @@ -129,7 +129,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX700v3_DPFPU/portmacro.h b/portable/GCC/RX700v3_DPFPU/portmacro.h index 8e5c458fe1b..95755edcc07 100644 --- a/portable/GCC/RX700v3_DPFPU/portmacro.h +++ b/portable/GCC/RX700v3_DPFPU/portmacro.h @@ -153,7 +153,7 @@ uint32_t ulPortGetIPL( void ) __attribute__( ( naked ) ); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__( ( naked ) ); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX100/portmacro.h b/portable/IAR/RX100/portmacro.h index 9d8605dd3e9..cd3253036a3 100644 --- a/portable/IAR/RX100/portmacro.h +++ b/portable/IAR/RX100/portmacro.h @@ -122,7 +122,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) /* Tickless idle/low power functionality. */ #if configUSE_TICKLESS_IDLE == 1 diff --git a/portable/IAR/RX600/portmacro.h b/portable/IAR/RX600/portmacro.h index 9a27a2a936f..087afccf12b 100644 --- a/portable/IAR/RX600/portmacro.h +++ b/portable/IAR/RX600/portmacro.h @@ -124,7 +124,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX700v3_DPFPU/portmacro.h b/portable/IAR/RX700v3_DPFPU/portmacro.h index dfee9a8599a..e406bfeb216 100644 --- a/portable/IAR/RX700v3_DPFPU/portmacro.h +++ b/portable/IAR/RX700v3_DPFPU/portmacro.h @@ -157,7 +157,7 @@ /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RXv2/portmacro.h b/portable/IAR/RXv2/portmacro.h index f9c851ffd7f..fa1d29eeb43 100644 --- a/portable/IAR/RXv2/portmacro.h +++ b/portable/IAR/RXv2/portmacro.h @@ -124,7 +124,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) /*-----------------------------------------------------------*/ diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 2a594a690d2..95353e26ffe 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -96,8 +96,8 @@ * of their memory address. */ typedef struct A_BLOCK_LINK { - struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */ - size_t xBlockSize; /*<< The size of the free block. */ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ } BlockLink_t; /*-----------------------------------------------------------*/ @@ -390,7 +390,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ { uxAddress += ( portBYTE_ALIGNMENT - 1 ); uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; + xTotalHeapSize -= (size_t) (uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap); } pucAlignedHeap = ( uint8_t * ) uxAddress; @@ -402,7 +402,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); uxAddress -= xHeapStructSize; uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); pxEnd = ( BlockLink_t * ) uxAddress; @@ -411,7 +411,15 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* To start with there is a single free block that is sized to take up the * entire heap space, minus the space taken by pxEnd. */ + #if defined(__clang__) + /* Alignment checked above with calculations on uxAddress */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wcast-align" + #endif /* defined(__clang__) */ pxFirstFreeBlock = ( BlockLink_t * ) pucAlignedHeap; + #if defined(__clang__) + #pragma clang diagnostic pop + #endif /* defined(__clang__) */ pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock ); pxFirstFreeBlock->pxNextFreeBlock = pxEnd; diff --git a/portable/Renesas/RX100/portmacro.h b/portable/Renesas/RX100/portmacro.h index c056e35b0f2..33355e31ec3 100644 --- a/portable/Renesas/RX100/portmacro.h +++ b/portable/Renesas/RX100/portmacro.h @@ -126,7 +126,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() ( UBaseType_t ) get_ipl(); set_ipl( ( signed long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( signed long ) uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( signed long ) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX200/portmacro.h b/portable/Renesas/RX200/portmacro.h index 08ddece4756..520e372bacb 100644 --- a/portable/Renesas/RX200/portmacro.h +++ b/portable/Renesas/RX200/portmacro.h @@ -126,7 +126,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600/portmacro.h b/portable/Renesas/RX600/portmacro.h index fec71cb574c..f6f6ff6e156 100644 --- a/portable/Renesas/RX600/portmacro.h +++ b/portable/Renesas/RX600/portmacro.h @@ -127,7 +127,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600v2/portmacro.h b/portable/Renesas/RX600v2/portmacro.h index 0430afdc291..3eda33ade72 100644 --- a/portable/Renesas/RX600v2/portmacro.h +++ b/portable/Renesas/RX600v2/portmacro.h @@ -127,7 +127,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() ( UBaseType_t ) get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX700v3_DPFPU/portmacro.h b/portable/Renesas/RX700v3_DPFPU/portmacro.h index b90b7d9f7fa..1787a643668 100644 --- a/portable/Renesas/RX700v3_DPFPU/portmacro.h +++ b/portable/Renesas/RX700v3_DPFPU/portmacro.h @@ -152,7 +152,7 @@ /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() ( UBaseType_t ) get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index bd0842fbf55..ff9a0cdea03 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -115,6 +115,9 @@ static void prvPortYieldFromISR( void ); /*-----------------------------------------------------------*/ static void prvFatalError( const char * pcCall, + int iErrno ) __attribute__ ((__noreturn__)); + +void prvFatalError( const char * pcCall, int iErrno ) { fprintf( stderr, "%s: %s\n", pcCall, strerror( iErrno ) ); @@ -141,7 +144,7 @@ portSTACK_TYPE * pxPortInitialiseStack( portSTACK_TYPE * pxTopOfStack, */ thread = ( Thread_t * ) ( pxTopOfStack + 1 ) - 1; pxTopOfStack = ( portSTACK_TYPE * ) thread - 1; - ulStackSize = ( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); + ulStackSize = (size_t)( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); thread->pxCode = pxCode; thread->pvParams = pvParameters; @@ -340,7 +343,7 @@ static uint64_t prvGetTimeNs( void ) clock_gettime( CLOCK_MONOTONIC, &t ); - return t.tv_sec * 1000000000ULL + t.tv_nsec; + return (uint64_t)t.tv_sec * 1000000000ULL + (uint64_t)t.tv_nsec; } static uint64_t prvStartTimeNs; diff --git a/queue.c b/queue.c index 86dae0bc4f4..58db9226a94 100644 --- a/queue.c +++ b/queue.c @@ -68,14 +68,14 @@ typedef struct QueuePointers { - int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ - int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ + int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */ } QueuePointers_t; typedef struct SemaphoreData { - TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */ - UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ + TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */ + UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ } SemaphoreData_t; /* Semaphores do not actually store or copy data, so have an item size of @@ -99,27 +99,27 @@ typedef struct SemaphoreData */ typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - int8_t * pcHead; /*< Points to the beginning of the queue storage area. */ - int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */ + int8_t * pcHead; /**< Points to the beginning of the queue storage area. */ + int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */ union { - QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */ - SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */ + QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */ + SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */ } u; - List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ - List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ - volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */ - UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ - UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ + volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */ + UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */ - volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ #endif #if ( configUSE_QUEUE_SETS == 1 ) @@ -268,14 +268,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * tasks than the number of tasks in the system. */ #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ - { \ + do { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \ { \ configASSERT( ( cTxLock ) != queueINT8_MAX ); \ ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \ } \ - } + } while(0) /* * Macro to increment cRxLock member of the queue data structure. It is @@ -283,14 +283,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * tasks than the number of tasks in the system. */ #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ - { \ + do { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \ { \ configASSERT( ( cRxLock ) != queueINT8_MAX ); \ ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ } \ - } + } while(0) /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -1050,7 +1050,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const BaseType_t xCopyPosition ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1078,7 +1078,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1203,7 +1203,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1213,7 +1213,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; /* Similar to xQueueGenericSendFromISR() but used with semaphores where the @@ -1249,7 +1249,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1369,7 +1369,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1884,7 +1884,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1906,7 +1906,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1966,7 +1966,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1976,7 +1976,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; int8_t * pcOriginalReadPosition; Queue_t * const pxQueue = xQueue; @@ -2000,7 +2000,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2021,7 +2021,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } diff --git a/stream_buffer.c b/stream_buffer.c index b81f072fb1a..18e8cd35564 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -70,7 +70,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - ( void ) xTaskResumeAll(); + ( void ) xTaskResumeAll() #endif /* sbRECEIVE_COMPLETED */ /* If user has provided a per-instance receive complete callback, then @@ -95,10 +95,10 @@ #ifndef sbRECEIVE_COMPLETED_FROM_ISR #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ - { \ - UBaseType_t uxSavedInterruptStatus; \ + do { \ + portBASE_TYPE xSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -109,8 +109,8 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ - } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ + } while(0) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) @@ -147,7 +147,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - ( void ) xTaskResumeAll(); + ( void ) xTaskResumeAll() #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -155,7 +155,7 @@ */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvSEND_COMPLETED( pxStreamBuffer ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ { \ pxStreamBuffer->pxSendCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \ @@ -164,7 +164,7 @@ { \ sbSEND_COMPLETED( ( pxStreamBuffer ) ); \ } \ - } + } while(0) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ @@ -172,10 +172,10 @@ #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - { \ - UBaseType_t uxSavedInterruptStatus; \ + do { \ + portBASE_TYPE xSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -186,14 +186,14 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ - } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ + } while(0) #endif /* sbSEND_COMPLETE_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ { \ ( pxStreamBuffer )->pxSendCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \ @@ -202,7 +202,7 @@ { \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ } \ - } + } while(0) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) @@ -370,7 +370,10 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, { pucAllocatedMemory = NULL; } - + #if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wcast-align" + #endif /* defined(__clang__) */ if( pucAllocatedMemory != NULL ) { prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ @@ -389,6 +392,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, } return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ + #if defined(__clang__) + #pragma clang diagnostic pop + #endif /* defined(__clang__) */ } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ /*-----------------------------------------------------------*/ @@ -1188,11 +1194,11 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1208,7 +1214,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1219,11 +1225,11 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1239,7 +1245,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } diff --git a/tasks.c b/tasks.c index c7192e537ff..00533a47624 100644 --- a/tasks.c +++ b/tasks.c @@ -134,7 +134,7 @@ /*-----------------------------------------------------------*/ #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ + do { \ UBaseType_t uxTopPriority = uxTopReadyPriority; \ \ /* Find the highest priority queue that contains ready tasks. */ \ @@ -148,7 +148,7 @@ * the same priority get an equal share of the processor time. */ \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ uxTopReadyPriority = uxTopPriority; \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + } while(0) /* taskSELECT_HIGHEST_PRIORITY_TASK */ /*-----------------------------------------------------------*/ @@ -199,7 +199,7 @@ /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick * count overflows. */ #define taskSWITCH_DELAYED_LISTS() \ - { \ + do { \ List_t * pxTemp; \ \ /* The delayed tasks list should be empty when the lists are switched. */ \ @@ -210,7 +210,7 @@ pxOverflowDelayedTaskList = pxTemp; \ xNumOfOverflows++; \ prvResetNextTaskUnblockTime(); \ - } + } while(0) /*-----------------------------------------------------------*/ @@ -219,10 +219,12 @@ * the task. It is inserted at the end of the list. */ #define prvAddTaskToReadyList( pxTCB ) \ + do { \ traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ - tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \ + } while(0) /*-----------------------------------------------------------*/ /* @@ -254,33 +256,33 @@ */ typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ #if ( portUSING_MPU_WRAPPERS == 1 ) - xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ #endif - ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ - ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ - UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ - StackType_t * pxStack; /*< Points to the start of the stack. */ - char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /**< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */ + StackType_t * pxStack; /**< Points to the start of the stack. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) - StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */ + StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */ #endif #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ #endif #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ - UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + UBaseType_t uxTCBNumber; /**< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */ #endif #if ( configUSE_MUTEXES == 1 ) - UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */ UBaseType_t uxMutexesHeld; #endif @@ -293,11 +295,11 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #endif #if ( configGENERATE_RUN_TIME_STATS == 1 ) - configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */ #endif #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) - configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ + configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) @@ -308,7 +310,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to /* See the comments in FreeRTOS.h with the definition of * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ #endif #if ( INCLUDE_xTaskAbortDelay == 1 ) @@ -332,23 +334,23 @@ portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but * doing so breaks some kernel aware debuggers and debuggers that rely on removing * the static qualifier. */ -PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ -PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1; /**< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2; /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /**< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList; /**< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ #if ( INCLUDE_vTaskDelete == 1 ) - PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */ PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; #endif #if ( INCLUDE_vTaskSuspend == 1 ) - PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */ + PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */ #endif @@ -368,7 +370,7 @@ PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /**< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -389,8 +391,8 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /**< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /**< Holds the total amount of execution time as defined by the run time counter clock. */ #endif @@ -1467,7 +1469,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) { TCB_t const * pxTCB; - UBaseType_t uxReturn, uxSavedInterruptState; + UBaseType_t uxReturn; + portBASE_TYPE xSavedInterruptState; /* RTOS ports that support interrupt nesting have the concept of a * maximum system call (or maximum API call) interrupt priority. @@ -1487,14 +1490,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptState ); return uxReturn; } @@ -1880,7 +1883,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { BaseType_t xYieldRequired = pdFALSE; TCB_t * const pxTCB = xTaskToResume; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToResume ); @@ -1902,7 +1905,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -1943,7 +1946,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xYieldRequired; } @@ -2301,7 +2304,7 @@ TickType_t xTaskGetTickCount( void ) TickType_t xTaskGetTickCountFromISR( void ) { TickType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; /* RTOS ports that support interrupt nesting have the concept of a maximum * system call (or maximum API call) interrupt priority. Interrupts that are @@ -2319,11 +2322,11 @@ TickType_t xTaskGetTickCountFromISR( void ) * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); { xReturn = xTickCount; } - portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -2533,7 +2536,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); #else - *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + *pulTotalRunTime = (configRUN_TIME_COUNTER_TYPE) portGET_RUN_TIME_COUNTER_VALUE(); #endif } } @@ -2950,18 +2953,18 @@ BaseType_t xTaskIncrementTick( void ) { TCB_t * pxTCB; TaskHookFunction_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; /* If xTask is NULL then set the calling task's hook. */ pxTCB = prvGetTCBFromHandle( xTask ); /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -3020,7 +3023,7 @@ void vTaskSwitchContext( void ) #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalRunTime = (configRUN_TIME_COUNTER_TYPE) portGET_RUN_TIME_COUNTER_VALUE(); #endif /* Add the amount of time the task has been running to the @@ -3420,7 +3423,9 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION( prvIdleTask, pvParameters ) +static void prvIdleTask( void *pvParameters ) __attribute__ ((__noreturn__)); + +portTASK_FUNCTION( prvIdleTask, pvParameters ) { /* Stop warnings. */ ( void ) pvParameters; @@ -4975,7 +4980,7 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5000,7 +5005,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -5094,7 +5099,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -5110,7 +5115,7 @@ TickType_t uxTaskResetEventItemValue( void ) { TCB_t * pxTCB; uint8_t ucOriginalNotifyState; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5135,7 +5140,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -5185,7 +5190,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -5269,7 +5274,7 @@ TickType_t uxTaskResetEventItemValue( void ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; - ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalTime = (configRUN_TIME_COUNTER_TYPE) portGET_RUN_TIME_COUNTER_VALUE(); /* For percentage calculations. */ ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; diff --git a/timers.c b/timers.c index 1019f7de9d8..a4f9677b540 100644 --- a/timers.c +++ b/timers.c @@ -74,15 +74,15 @@ /* The definition of the timers themselves. */ typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - const char * pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ - TickType_t xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */ - void * pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ - TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + const char * pcTimerName; /**< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /**< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks; /**< How quickly and often the timer expires. */ + void * pvTimerID; /**< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + TimerCallbackFunction_t pxCallbackFunction; /**< The function that will be called when the timer expires. */ #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + UBaseType_t uxTimerNumber; /**< An ID assigned by trace tools such as FreeRTOS+Trace */ #endif - uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ + uint8_t ucStatus; /**< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ } xTIMER; /* The old xTIMER name is maintained above then typedefed to the new Timer_t @@ -96,8 +96,8 @@ * and xCallbackParametersType respectively. */ typedef struct tmrTimerParameters { - TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */ - Timer_t * pxTimer; /*<< The timer to which the command will be applied. */ + TickType_t xMessageValue; /**< An optional value used by a subset of commands, for example, when changing the period of a timer. */ + Timer_t * pxTimer; /**< The timer to which the command will be applied. */ } TimerParameter_t; @@ -112,7 +112,7 @@ * that is used to determine which message type is valid. */ typedef struct tmrTimerQueueMessage { - BaseType_t xMessageID; /*<< The command being sent to the timer service task. */ + BaseType_t xMessageID; /**< The command being sent to the timer service task. */ union { TimerParameter_t xTimerParameters; @@ -564,8 +564,10 @@ pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); } /*-----------------------------------------------------------*/ +static void prvTimerTask( void *pvParameters ) __attribute__ ((__noreturn__)); - static portTASK_FUNCTION( prvTimerTask, pvParameters ) + +portTASK_FUNCTION( prvTimerTask, pvParameters ) { TickType_t xNextExpireTime; BaseType_t xListWasEmpty; From 2e2aa7ae7a999111d343ad46bad0a9ef44c8ed01 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Sun, 23 Oct 2022 12:12:17 -0700 Subject: [PATCH 08/24] Adding in project information and how to compile for GNU/clang --- CMakeLists.txt | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 205c19a4817..d9214c9b297 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,15 @@ cmake_minimum_required(VERSION 3.15) + +######################################################################## +# Project Details +project(FreeRTOS-Kernel + VERSION 10.6.0 + DESCRIPTION "FreeRTOS Kernel" + HOMEPAGE_URL https://www.freertos.org/RTOS.html + LANGUAGES C) + + # User is responsible to one mandatory option: # FREERTOS_PORT, if not specified and native port detected, uses the native compile. # @@ -222,6 +232,47 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " freertos_kernel)") endif() +######################################################################## +# Requirements +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED ON) + +######################################################################## +# Overall Compile Options +# Note the compile option strategy is to error on everything and then +# Per library opt-out of things that are warnings/errors. +# This ensures that no matter what strategy for compilation you take, the +# builds will still occur. +# +# Only tested with GNU and Clang. +# Other options are https://cmake.org/cmake/help/latest/variable/CMAKE_LANG_COMPILER_ID.html#variable:CMAKE_%3CLANG%3E_COMPILER_ID +# Naming of compilers translation map: +# +# FreeRTOS | CMake +# ------------------- +# CCS | ?TBD? +# GCC | GNU, Clang, *Clang Others? +# IAR | IAR +# Keil | ARMCC +# MSVC | MSVC # Note only for MinGW? +# Renesas | ?TBD? + +add_compile_options( + ### Gnu/Clang C Options + $<$:-fdiagnostics-color=always> + $<$:-fcolor-diagnostics> + + $<$:-Wall> + $<$:-Wextra> + $<$:-Wpedantic> + $<$:-Werror> + $<$:-Weverything> + + # TODO: Add in other Compilers here. +) + + +######################################################################## add_subdirectory(portable) add_library(freertos_kernel STATIC @@ -244,6 +295,14 @@ target_include_directories(freertos_kernel $<$>:${FREERTOS_CONFIG_FILE_DIRECTORY}> ) +target_compile_options( freertos_kernel + PRIVATE + $<$:-Wno-covered-switch-default> + $<$:-Wno-missing-variable-declarations> + $<$:-Wno-padded> + $<$:-Wno-unused-macros> +) + target_link_libraries(freertos_kernel PUBLIC $<$:freertos_config> From 89ac5873d732392bc89c95117c691f2f51649906 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Sun, 23 Oct 2022 17:07:08 -0700 Subject: [PATCH 09/24] Fixing compiler issue with unused variable - no need to declare variable. --- event_groups.c | 13 ++++--------- stream_buffer.c | 27 +++++++++------------------ 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/event_groups.c b/event_groups.c index 6f486792bbb..4620daf200a 100644 --- a/event_groups.c +++ b/event_groups.c @@ -100,15 +100,10 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, /* A StaticEventGroup_t object must be provided. */ configASSERT( pxEventGroupBuffer ); - #if ( configASSERT_DEFINED == 1 ) - { - /* Sanity check that the size of the structure used to declare a - * variable of type StaticEventGroup_t equals the size of the real - * event group structure. */ - volatile size_t xSize = sizeof( StaticEventGroup_t ); - configASSERT( xSize == sizeof( EventGroup_t ) ); - } /*lint !e529 xSize is referenced if configASSERT() is defined. */ - #endif /* configASSERT_DEFINED */ + /* Sanity check that the size of the structure used to declare a + * variable of type StaticEventGroup_t equals the size of the real + * event group structure. */ + configASSERT( sizeof( StaticEventGroup_t ) == sizeof( EventGroup_t ) ); /* The user has provided a statically allocated event group - use it. */ pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer; /*lint !e740 !e9087 EventGroup_t and StaticEventGroup_t are deliberately aliased for data hiding purposes and guaranteed to have the same size and alignment requirement - checked by configASSERT(). */ diff --git a/stream_buffer.c b/stream_buffer.c index 18e8cd35564..cd036af6792 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -441,15 +441,10 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, * to hold at least one message. */ configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); - #if ( configASSERT_DEFINED == 1 ) - { - /* Sanity check that the size of the structure used to declare a - * variable of type StaticStreamBuffer_t equals the size of the real - * message buffer structure. */ - volatile size_t xSize = sizeof( StaticStreamBuffer_t ); - configASSERT( xSize == sizeof( StreamBuffer_t ) ); - } /*lint !e529 xSize is referenced is configASSERT() is defined. */ - #endif /* configASSERT_DEFINED */ + /* Sanity check that the size of the structure used to declare a + * variable of type StaticStreamBuffer_t equals the size of the real + * message buffer structure. */ + configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) { @@ -1373,15 +1368,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, /* Assert here is deliberately writing to the entire buffer to ensure it can * be written to without generating exceptions, and is setting the buffer to a * known value to assist in development/debugging. */ - #if ( configASSERT_DEFINED == 1 ) - { - /* The value written just has to be identifiable when looking at the - * memory. Don't use 0xA5 as that is the stack fill value and could - * result in confusion as to what is actually being observed. */ - const BaseType_t xWriteValue = 0x55; - configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); - } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ - #endif + #define STREAM_BUFFER_BUFFER_WRITE_VALUE (0x55) + /* The value written just has to be identifiable when looking at the + * memory. Don't use 0xA5 as that is the stack fill value and could + * result in confusion as to what is actually being observed. */ + configASSERT( memset( pucBuffer, ( int ) STREAM_BUFFER_BUFFER_WRITE_VALUE, xBufferSizeBytes ) == pucBuffer ); ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ pxStreamBuffer->pucBuffer = pucBuffer; From 89526344e889e1f31ca60d009c6a258396487b8d Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 31 Oct 2022 20:09:35 -0700 Subject: [PATCH 10/24] Adding in compile warnings for linux builds that kernel is okay with using. --- portable/CMakeLists.txt | 6 ++++++ portable/ThirdParty/GCC/Posix/utils/wait_for_event.h | 6 +++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index ae9de9cfe1b..44bcc9e95a5 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -994,6 +994,12 @@ if(FREERTOS_PORT STREQUAL GCC_POSIX) find_package(Threads REQUIRED) endif() +target_compile_options(freertos_kernel_port + PRIVATE + $<$:-Wno-padded> + $<$:-Wno-disabled-macro-expansion> +) + target_link_libraries(freertos_kernel_port PUBLIC $<$:pico_base_headers> diff --git a/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h b/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h index 65e3e5481dd..6f6efa3ea72 100644 --- a/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h +++ b/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h @@ -26,8 +26,8 @@ * */ -#ifndef _WAIT_FOR_EVENT_H_ -#define _WAIT_FOR_EVENT_H_ +#ifndef WAIT_FOR_EVENT_H_ +#define WAIT_FOR_EVENT_H_ #include #include @@ -43,4 +43,4 @@ void event_signal( struct event * ev ); -#endif /* ifndef _WAIT_FOR_EVENT_H_ */ +#endif /* ifndef WAIT_FOR_EVENT_H_ */ From 6d60ad822c798a9444f8ef7cc162e0fa628fa3e8 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Wed, 2 Nov 2022 12:10:52 -0700 Subject: [PATCH 11/24] Fixing more extra-semi-stmt clang warnings. --- include/stack_macros.h | 4 ++-- tasks.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/stack_macros.h b/include/stack_macros.h index 6894a3ed338..58370931673 100644 --- a/include/stack_macros.h +++ b/include/stack_macros.h @@ -87,7 +87,7 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ \ @@ -98,7 +98,7 @@ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while(0) #endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ /*-----------------------------------------------------------*/ diff --git a/tasks.c b/tasks.c index 00533a47624..92f508c6505 100644 --- a/tasks.c +++ b/tasks.c @@ -170,14 +170,14 @@ /*-----------------------------------------------------------*/ #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ + do { \ UBaseType_t uxTopPriority; \ \ /* Find the highest priority list that contains ready tasks. */ \ portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ + } while(0) /*-----------------------------------------------------------*/ @@ -185,12 +185,12 @@ * is being referenced from a ready list. If it is referenced from a delayed * or suspended list then it won't be in a ready list. */ #define taskRESET_READY_PRIORITY( uxPriority ) \ - { \ + do { \ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ { \ portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ } \ - } + } while(0) #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ From 4bddfa369f3bb1cfee7531524c4da5e37fd3d3bb Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Thu, 3 Nov 2022 09:35:16 -0700 Subject: [PATCH 12/24] Moving definition of hooks into header files if features are enabled. --- include/task.h | 16 +++++++++++++++- include/timers.h | 14 ++++++++++++++ timers.c | 2 -- 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/include/task.h b/include/task.h index d7f4fc22678..51649231b72 100644 --- a/include/task.h +++ b/include/task.h @@ -1649,7 +1649,7 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL #endif -#if ( configUSE_TICK_HOOK > 0 ) +#if ( configUSE_TICK_HOOK != 0 ) /** * task.h @@ -1663,6 +1663,20 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL #endif +#if ( configUSE_IDLE_HOOK != 0 ) +/** + * task.h + * @code{c} + * void vApplicationTickHook( void ); + * @endcode + * + * This hook function is called in the ilde task handler + * @note: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, CALL A FUNCTION THAT MIGHT BLOCK. + */ + void vApplicationIdleHook( void ); + +#endif + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) /** diff --git a/include/timers.h b/include/timers.h index d255c3986e7..f18895cf720 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1361,6 +1361,20 @@ BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, #endif +#if ( configUSE_DAEMON_TASK_STARTUP_HOOK != 0 ) + +/** + * timers.h + * @code{c} + * void vApplicationTickHook( void ); + * @endcode + * + * This hook function is called in the timers task before task executes. + */ + void vApplicationDaemonTaskStartupHook(void); + +#endif + /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/timers.c b/timers.c index a4f9677b540..3fa95e4d98e 100644 --- a/timers.c +++ b/timers.c @@ -577,8 +577,6 @@ portTASK_FUNCTION( prvTimerTask, pvParameters ) #if ( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 ) { - extern void vApplicationDaemonTaskStartupHook( void ); - /* Allow the application writer to execute some code in the context of * this task at the point the task starts executing. This is useful if the * application includes initialisation code that would benefit from From 5b7045c1f17ef52e9eb40db899ec4861eb45688d Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Thu, 3 Nov 2022 10:20:55 -0700 Subject: [PATCH 13/24] Fixing formatting with uncrustify. --- include/croutine.h | 66 +++++++++++++++++++-------------------- include/list.h | 8 ++--- include/stack_macros.h | 2 +- include/task.h | 1 + include/timers.h | 2 +- portable/MemMang/heap_4.c | 10 +++--- queue.c | 4 +-- stream_buffer.c | 24 ++++++++------ tasks.c | 30 +++++++++--------- timers.c | 4 +-- 10 files changed, 78 insertions(+), 73 deletions(-) diff --git a/include/croutine.h b/include/croutine.h index e83d3a20b87..cafc1ecc4e2 100644 --- a/include/croutine.h +++ b/include/croutine.h @@ -27,13 +27,13 @@ */ #ifndef CO_ROUTINE_H -#define CO_ROUTINE_H + #define CO_ROUTINE_H -#ifndef INC_FREERTOS_H - #error "include FreeRTOS.h must appear in source files before include croutine.h" -#endif + #ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include croutine.h" + #endif -#include "list.h" + #include "list.h" /* *INDENT-OFF* */ #ifdef __cplusplus @@ -44,21 +44,21 @@ /* Used to hide the implementation of the co-routine control block. The * control block structure however has to be included in the header due to * the macro implementation of the co-routine functionality. */ -typedef void * CoRoutineHandle_t; + typedef void * CoRoutineHandle_t; /* Defines the prototype to which co-routine functions must conform. */ -typedef void (* crCOROUTINE_CODE)( CoRoutineHandle_t, - UBaseType_t ); + typedef void (* crCOROUTINE_CODE)( CoRoutineHandle_t, + UBaseType_t ); -typedef struct corCoRoutineControlBlock -{ - crCOROUTINE_CODE pxCoRoutineFunction; - ListItem_t xGenericListItem; /**< List item used to place the CRCB in ready and blocked queues. */ - ListItem_t xEventListItem; /**< List item used to place the CRCB in event lists. */ - UBaseType_t uxPriority; /**< The priority of the co-routine in relation to other co-routines. */ - UBaseType_t uxIndex; /**< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ - uint16_t uxState; /**< Used internally by the co-routine implementation. */ -} CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ + typedef struct corCoRoutineControlBlock + { + crCOROUTINE_CODE pxCoRoutineFunction; + ListItem_t xGenericListItem; /**< List item used to place the CRCB in ready and blocked queues. */ + ListItem_t xEventListItem; /**< List item used to place the CRCB in event lists. */ + UBaseType_t uxPriority; /**< The priority of the co-routine in relation to other co-routines. */ + UBaseType_t uxIndex; /**< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ + uint16_t uxState; /**< Used internally by the co-routine implementation. */ + } CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ /** * croutine. h @@ -133,9 +133,9 @@ typedef struct corCoRoutineControlBlock * \defgroup xCoRoutineCreate xCoRoutineCreate * \ingroup Tasks */ -BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, - UBaseType_t uxPriority, - UBaseType_t uxIndex ); + BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, + UBaseType_t uxPriority, + UBaseType_t uxIndex ); /** @@ -178,7 +178,7 @@ BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, * \defgroup vCoRoutineSchedule vCoRoutineSchedule * \ingroup Tasks */ -void vCoRoutineSchedule( void ); + void vCoRoutineSchedule( void ); /** * croutine. h @@ -211,7 +211,7 @@ void vCoRoutineSchedule( void ); * \defgroup crSTART crSTART * \ingroup Tasks */ -#define crSTART( pxCRCB ) \ + #define crSTART( pxCRCB ) \ switch( ( ( CRCB_t * ) ( pxCRCB ) )->uxState ) { \ case 0: @@ -246,16 +246,16 @@ void vCoRoutineSchedule( void ); * \defgroup crSTART crSTART * \ingroup Tasks */ -#define crEND() } + #define crEND() } /* * These macros are intended for internal use by the co-routine implementation * only. The macros should not be used directly by application writers. */ -#define crSET_STATE0( xHandle ) \ + #define crSET_STATE0( xHandle ) \ ( ( CRCB_t * ) ( xHandle ) )->uxState = ( __LINE__ * 2 ); return; \ case ( __LINE__ * 2 ): -#define crSET_STATE1( xHandle ) \ + #define crSET_STATE1( xHandle ) \ ( ( CRCB_t * ) ( xHandle ) )->uxState = ( ( __LINE__ * 2 ) + 1 ); return; \ case ( ( __LINE__ * 2 ) + 1 ): @@ -307,7 +307,7 @@ void vCoRoutineSchedule( void ); * \defgroup crDELAY crDELAY * \ingroup Tasks */ -#define crDELAY( xHandle, xTicksToDelay ) \ + #define crDELAY( xHandle, xTicksToDelay ) \ if( ( xTicksToDelay ) > 0 ) \ { \ vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ @@ -399,7 +399,7 @@ void vCoRoutineSchedule( void ); * \defgroup crQUEUE_SEND crQUEUE_SEND * \ingroup Tasks */ -#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ + #define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ { \ *( pxResult ) = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), ( xTicksToWait ) ); \ if( *( pxResult ) == errQUEUE_BLOCKED ) \ @@ -493,7 +493,7 @@ void vCoRoutineSchedule( void ); * \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE * \ingroup Tasks */ -#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ + #define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ { \ *( pxResult ) = xQueueCRReceive( ( pxQueue ), ( pvBuffer ), ( xTicksToWait ) ); \ if( *( pxResult ) == errQUEUE_BLOCKED ) \ @@ -604,7 +604,7 @@ void vCoRoutineSchedule( void ); * \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR * \ingroup Tasks */ -#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) \ + #define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) \ xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) ) @@ -720,7 +720,7 @@ void vCoRoutineSchedule( void ); * \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR * \ingroup Tasks */ -#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) \ + #define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) \ xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) ) /* @@ -732,8 +732,8 @@ void vCoRoutineSchedule( void ); * Removes the current co-routine from its ready list and places it in the * appropriate delayed list. */ -void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, - List_t * pxEventList ); + void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, + List_t * pxEventList ); /* * This function is intended for internal use by the queue implementation only. @@ -742,7 +742,7 @@ void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, * Removes the highest priority co-routine from the event list and places it in * the pending ready list. */ -BaseType_t xCoRoutineRemoveFromEventList( const List_t * pxEventList ); + BaseType_t xCoRoutineRemoveFromEventList( const List_t * pxEventList ); /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/include/list.h b/include/list.h index 851c7e4673e..fdf88b270fd 100644 --- a/include/list.h +++ b/include/list.h @@ -293,7 +293,7 @@ typedef struct xLIST ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ } \ ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ - } while(0) + } while( 0 ) /* * Version of uxListRemove() that does not return a value. Provided as a slight @@ -312,7 +312,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listREMOVE_ITEM( pxItemToRemove ) \ - do { \ + do { \ /* The list item knows which list it is in. Obtain the list from the list \ * item. */ \ List_t * const pxList = ( pxItemToRemove )->pxContainer; \ @@ -327,7 +327,7 @@ typedef struct xLIST \ ( pxItemToRemove )->pxContainer = NULL; \ ( pxList->uxNumberOfItems )--; \ - } while(0) + } while( 0 ) /* * Inline version of vListInsertEnd() to provide slight optimisation for @@ -374,7 +374,7 @@ typedef struct xLIST ( pxNewListItem )->pxContainer = ( pxList ); \ \ ( ( pxList )->uxNumberOfItems )++; \ - } while(0) + } while( 0 ) /* * Access function to obtain the owner of the first entry in a list. Lists diff --git a/include/stack_macros.h b/include/stack_macros.h index 58370931673..6708a58605b 100644 --- a/include/stack_macros.h +++ b/include/stack_macros.h @@ -98,7 +98,7 @@ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } while(0) + } while( 0 ) #endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ /*-----------------------------------------------------------*/ diff --git a/include/task.h b/include/task.h index 51649231b72..31e9b612541 100644 --- a/include/task.h +++ b/include/task.h @@ -1664,6 +1664,7 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL #endif #if ( configUSE_IDLE_HOOK != 0 ) + /** * task.h * @code{c} diff --git a/include/timers.h b/include/timers.h index f18895cf720..fa928f0e559 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1371,7 +1371,7 @@ BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, * * This hook function is called in the timers task before task executes. */ - void vApplicationDaemonTaskStartupHook(void); + void vApplicationDaemonTaskStartupHook( void ); #endif diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 95353e26ffe..4ad3b17b07b 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -390,7 +390,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ { uxAddress += ( portBYTE_ALIGNMENT - 1 ); uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= (size_t) (uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap); + xTotalHeapSize -= ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap ); } pucAlignedHeap = ( uint8_t * ) uxAddress; @@ -402,7 +402,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); + uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); uxAddress -= xHeapStructSize; uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); pxEnd = ( BlockLink_t * ) uxAddress; @@ -411,13 +411,13 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* To start with there is a single free block that is sized to take up the * entire heap space, minus the space taken by pxEnd. */ - #if defined(__clang__) - /* Alignment checked above with calculations on uxAddress */ + #if defined( __clang__ ) + /* Alignment checked above with calculations on uxAddress */ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-align" #endif /* defined(__clang__) */ pxFirstFreeBlock = ( BlockLink_t * ) pucAlignedHeap; - #if defined(__clang__) + #if defined( __clang__ ) #pragma clang diagnostic pop #endif /* defined(__clang__) */ pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock ); diff --git a/queue.c b/queue.c index 58db9226a94..9c051f211d4 100644 --- a/queue.c +++ b/queue.c @@ -275,7 +275,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, configASSERT( ( cTxLock ) != queueINT8_MAX ); \ ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \ } \ - } while(0) + } while( 0 ) /* * Macro to increment cRxLock member of the queue data structure. It is @@ -290,7 +290,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, configASSERT( ( cRxLock ) != queueINT8_MAX ); \ ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ } \ - } while(0) + } while( 0 ) /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, diff --git a/stream_buffer.c b/stream_buffer.c index cd036af6792..5174d58c2b8 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -110,7 +110,7 @@ } \ } \ portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ - } while(0) + } while( 0 ) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) @@ -164,7 +164,7 @@ { \ sbSEND_COMPLETED( ( pxStreamBuffer ) ); \ } \ - } while(0) + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ @@ -172,7 +172,7 @@ #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - do { \ + do { \ portBASE_TYPE xSavedInterruptStatus; \ \ xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ @@ -187,7 +187,7 @@ } \ } \ portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ - } while(0) + } while( 0 ) #endif /* sbSEND_COMPLETE_FROM_ISR */ @@ -202,7 +202,7 @@ { \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ } \ - } while(0) + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) @@ -370,10 +370,12 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, { pucAllocatedMemory = NULL; } - #if defined(__clang__) + + #if defined( __clang__ ) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-align" #endif /* defined(__clang__) */ + if( pucAllocatedMemory != NULL ) { prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ @@ -392,7 +394,8 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, } return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ - #if defined(__clang__) + + #if defined( __clang__ ) #pragma clang diagnostic pop #endif /* defined(__clang__) */ } @@ -442,8 +445,8 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); /* Sanity check that the size of the structure used to declare a - * variable of type StaticStreamBuffer_t equals the size of the real - * message buffer structure. */ + * variable of type StaticStreamBuffer_t equals the size of the real + * message buffer structure. */ configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) @@ -1368,7 +1371,8 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, /* Assert here is deliberately writing to the entire buffer to ensure it can * be written to without generating exceptions, and is setting the buffer to a * known value to assist in development/debugging. */ - #define STREAM_BUFFER_BUFFER_WRITE_VALUE (0x55) +#define STREAM_BUFFER_BUFFER_WRITE_VALUE ( 0x55 ) + /* The value written just has to be identifiable when looking at the * memory. Don't use 0xA5 as that is the stack fill value and could * result in confusion as to what is actually being observed. */ diff --git a/tasks.c b/tasks.c index 92f508c6505..4f0d66fafa4 100644 --- a/tasks.c +++ b/tasks.c @@ -148,7 +148,7 @@ * the same priority get an equal share of the processor time. */ \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ uxTopReadyPriority = uxTopPriority; \ - } while(0) /* taskSELECT_HIGHEST_PRIORITY_TASK */ + } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */ /*-----------------------------------------------------------*/ @@ -177,7 +177,7 @@ portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - } while(0) + } while( 0 ) /*-----------------------------------------------------------*/ @@ -190,7 +190,7 @@ { \ portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ } \ - } while(0) + } while( 0 ) #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ @@ -210,7 +210,7 @@ pxOverflowDelayedTaskList = pxTemp; \ xNumOfOverflows++; \ prvResetNextTaskUnblockTime(); \ - } while(0) + } while( 0 ) /*-----------------------------------------------------------*/ @@ -218,13 +218,13 @@ * Place the task represented by pxTCB into the appropriate ready list for * the task. It is inserted at the end of the list. */ -#define prvAddTaskToReadyList( pxTCB ) \ - do { \ - traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ - taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ - listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ - tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \ - } while(0) +#define prvAddTaskToReadyList( pxTCB ) \ + do { \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \ + } while( 0 ) /*-----------------------------------------------------------*/ /* @@ -2536,7 +2536,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); #else - *pulTotalRunTime = (configRUN_TIME_COUNTER_TYPE) portGET_RUN_TIME_COUNTER_VALUE(); + *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); #endif } } @@ -3023,7 +3023,7 @@ void vTaskSwitchContext( void ) #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); #else - ulTotalRunTime = (configRUN_TIME_COUNTER_TYPE) portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); #endif /* Add the amount of time the task has been running to the @@ -3423,7 +3423,7 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static void prvIdleTask( void *pvParameters ) __attribute__ ((__noreturn__)); +static void prvIdleTask( void * pvParameters ) __attribute__( ( __noreturn__ ) ); portTASK_FUNCTION( prvIdleTask, pvParameters ) { @@ -5274,7 +5274,7 @@ TickType_t uxTaskResetEventItemValue( void ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; - ulTotalTime = (configRUN_TIME_COUNTER_TYPE) portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); /* For percentage calculations. */ ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; diff --git a/timers.c b/timers.c index 3fa95e4d98e..6fd3b0ad4ca 100644 --- a/timers.c +++ b/timers.c @@ -564,10 +564,10 @@ pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); } /*-----------------------------------------------------------*/ -static void prvTimerTask( void *pvParameters ) __attribute__ ((__noreturn__)); + static void prvTimerTask( void * pvParameters ) __attribute__( ( __noreturn__ ) ); -portTASK_FUNCTION( prvTimerTask, pvParameters ) + portTASK_FUNCTION( prvTimerTask, pvParameters ) { TickType_t xNextExpireTime; BaseType_t xListWasEmpty; From ec7de59e3c86f4034a7101ea98dd9ef0fa913ace Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Thu, 9 Feb 2023 13:19:50 -0800 Subject: [PATCH 14/24] Fixing merge conflicts with main merge. --- CMakeLists.txt | 327 ++++++++++-------- event_groups.c | 19 +- include/task.h | 15 - portable/CMakeLists.txt | 4 + portable/GCC/RX100/portmacro.h | 2 +- portable/GCC/RX200/portmacro.h | 2 +- portable/GCC/RX600/portmacro.h | 2 +- portable/GCC/RX600v2/portmacro.h | 2 +- portable/GCC/RX700v3_DPFPU/portmacro.h | 2 +- portable/IAR/RX100/portmacro.h | 2 +- portable/IAR/RX600/portmacro.h | 2 +- portable/IAR/RX700v3_DPFPU/portmacro.h | 2 +- portable/IAR/RXv2/portmacro.h | 2 +- portable/MemMang/heap_4.c | 4 +- portable/Renesas/RX100/portmacro.h | 2 +- portable/Renesas/RX200/portmacro.h | 2 +- portable/Renesas/RX600/portmacro.h | 2 +- portable/Renesas/RX600v2/portmacro.h | 2 +- portable/Renesas/RX700v3_DPFPU/portmacro.h | 2 +- .../GCC/Posix/utils/wait_for_event.h | 6 +- stream_buffer.c | 24 +- 21 files changed, 229 insertions(+), 198 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 66abe149c3b..3224a97653b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,8 +1,10 @@ cmake_minimum_required(VERSION 3.15) -# User is responsible to set two mandatory options: -# FREERTOS_CONFIG_FILE_DIRECTORY -# FREERTOS_PORT +# User is responsible to one mandatory option: +# FREERTOS_PORT, if not specified and native port detected, uses the native compile. +# +# User is responsible for one library target: +# freertos_config ,typcially an INTERFACE library # # DEPRECATED: FREERTOS_CONFIG_FILE_DIRECTORY - but still supported if no freertos_config defined for now. # May be removed at some point in the future. @@ -46,150 +48,178 @@ if(NOT FREERTOS_PORT) " -DFREERTOS_PORT=GCC_ARM_CM4F\n" " \n" " Available port options:\n" - " BCC_16BIT_DOS_FLSH186 - Compiller: BCC Target: 16 bit DOS Flsh186\n" - " BCC_16BIT_DOS_PC - Compiller: BCC Target: 16 bit DOS PC\n" - " CCS_ARM_CM3 - Compiller: CCS Target: ARM Cortex-M3\n" - " CCS_ARM_CM4F - Compiller: CCS Target: ARM Cortex-M4 with FPU\n" - " CCS_ARM_CR4 - Compiller: CCS Target: ARM Cortex-R4\n" - " CCS_MSP430X - Compiller: CCS Target: MSP430X\n" - " CODEWARRIOR_COLDFIRE_V1 - Compiller: CoreWarrior Target: ColdFire V1\n" - " CODEWARRIOR_COLDFIRE_V2 - Compiller: CoreWarrior Target: ColdFire V2\n" - " CODEWARRIOR_HCS12 - Compiller: CoreWarrior Target: HCS12\n" - " GCC_ARM_CA9 - Compiller: GCC Target: ARM Cortex-A9\n" - " GCC_ARM_CA53_64_BIT - Compiller: GCC Target: ARM Cortex-A53 64 bit\n" - " GCC_ARM_CA53_64_BIT_SRE - Compiller: GCC Target: ARM Cortex-A53 64 bit SRE\n" - " GCC_ARM_CM0 - Compiller: GCC Target: ARM Cortex-M0\n" - " GCC_ARM_CM3 - Compiller: GCC Target: ARM Cortex-M3\n" - " GCC_ARM_CM3_MPU - Compiller: GCC Target: ARM Cortex-M3 with MPU\n" - " GCC_ARM_CM4_MPU - Compiller: GCC Target: ARM Cortex-M4 with MPU\n" - " GCC_ARM_CM4F - Compiller: GCC Target: ARM Cortex-M4 with FPU\n" - " GCC_ARM_CM7 - Compiller: GCC Target: ARM Cortex-M7\n" - " GCC_ARM_CM23_NONSECURE - Compiller: GCC Target: ARM Cortex-M23 non-secure\n" - " GCC_ARM_CM23_SECURE - Compiller: GCC Target: ARM Cortex-M23 secure\n" - " GCC_ARM_CM23_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M23 non-trustzone non-secure\n" - " GCC_ARM_CM33_NONSECURE - Compiller: GCC Target: ARM Cortex-M33 non-secure\n" - " GCC_ARM_CM33_SECURE - Compiller: GCC Target: ARM Cortex-M33 secure\n" - " GCC_ARM_CM33_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M33 non-trustzone non-secure\n" - " GCC_ARM_CM33_TFM - Compiller: GCC Target: ARM Cortex-M33 non-secure for TF-M\n" - " GCC_ARM_CM55_NONSECURE - Compiller: GCC Target: ARM Cortex-M55 non-secure\n" - " GCC_ARM_CM55_SECURE - Compiller: GCC Target: ARM Cortex-M55 secure\n" - " GCC_ARM_CM55_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M55 non-trustzone non-secure\n" - " GCC_ARM_CM55_TFM - Compiller: GCC Target: ARM Cortex-M55 non-secure for TF-M\n" - " GCC_ARM_CM85_NONSECURE - Compiller: GCC Target: ARM Cortex-M85 non-secure\n" - " GCC_ARM_CM85_SECURE - Compiller: GCC Target: ARM Cortex-M85 secure\n" - " GCC_ARM_CM85_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M85 non-trustzone non-secure\n" - " GCC_ARM_CM85_TFM - Compiller: GCC Target: ARM Cortex-M85 non-secure for TF-M\n" - " GCC_ARM_CR5 - Compiller: GCC Target: ARM Cortex-R5\n" - " GCC_ARM_CRX_NOGIC - Compiller: GCC Target: ARM Cortex-Rx no GIC\n" - " GCC_ARM7_AT91FR40008 - Compiller: GCC Target: ARM7 Atmel AT91R40008\n" - " GCC_ARM7_AT91SAM7S - Compiller: GCC Target: ARM7 Atmel AT91SAM7S\n" - " GCC_ARM7_LPC2000 - Compiller: GCC Target: ARM7 LPC2000\n" - " GCC_ARM7_LPC23XX - Compiller: GCC Target: ARM7 LPC23xx\n" - " GCC_ATMEGA323 - Compiller: GCC Target: ATMega323\n" - " GCC_AVR32_UC3 - Compiller: GCC Target: AVR32 UC3\n" - " GCC_COLDFIRE_V2 - Compiller: GCC Target: ColdFire V2\n" - " GCC_CORTUS_APS3 - Compiller: GCC Target: CORTUS APS3\n" - " GCC_H8S2329 - Compiller: GCC Target: H8S2329\n" - " GCC_HCS12 - Compiller: GCC Target: HCS12\n" - " GCC_IA32_FLAT - Compiller: GCC Target: IA32 flat\n" - " GCC_MICROBLAZE - Compiller: GCC Target: MicroBlaze\n" - " GCC_MICROBLAZE_V8 - Compiller: GCC Target: MicroBlaze V8\n" - " GCC_MICROBLAZE_V9 - Compiller: GCC Target: MicroBlaze V9\n" - " GCC_MSP430F449 - Compiller: GCC Target: MSP430F449\n" - " GCC_NIOSII - Compiller: GCC Target: NiosII\n" - " GCC_PPC405_XILINX - Compiller: GCC Target: Xilinx PPC405\n" - " GCC_PPC440_XILINX - Compiller: GCC Target: Xilinx PPC440\n" - " GCC_RISC_V - Compiller: GCC Target: RISC-V\n" - " GCC_RISC_V_PULPINO_VEGA_RV32M1RM - Compiller: GCC Target: RISC-V Pulpino Vega RV32M1RM\n" - " GCC_RL78 - Compiller: GCC Target: Renesas RL78\n" - " GCC_RX100 - Compiller: GCC Target: Renesas RX100\n" - " GCC_RX200 - Compiller: GCC Target: Renesas RX200\n" - " GCC_RX600 - Compiller: GCC Target: Renesas RX600\n" - " GCC_RX600_V2 - Compiller: GCC Target: Renesas RX600 v2\n" - " GCC_RX700_V3_DPFPU - Compiller: GCC Target: Renesas RX700 v3 with DPFPU\n" - " GCC_STR75X - Compiller: GCC Target: STR75x\n" - " GCC_TRICORE_1782 - Compiller: GCC Target: TriCore 1782\n" - " GCC_ARC_EM_HS - Compiller: GCC Target: DesignWare ARC EM HS\n" - " GCC_ARC_V1 - Compiller: GCC Target: DesignWare ARC v1\n" - " GCC_ATMEGA - Compiller: GCC Target: ATmega\n" - " GCC_POSIX - Compiller: GCC Target: Posix\n" - " GCC_RP2040 - Compiller: GCC Target: RP2040 ARM Cortex-M0+\n" - " GCC_XTENSA_ESP32 - Compiller: GCC Target: Xtensa ESP32\n" - " GCC_AVRDX - Compiller: GCC Target: AVRDx\n" - " GCC_AVR_MEGA0 - Compiller: GCC Target: AVR Mega0\n" - " IAR_78K0K - Compiller: IAR Target: Renesas 78K0K\n" - " IAR_ARM_CA5_NOGIC - Compiller: IAR Target: ARM Cortex-A5 no GIC\n" - " IAR_ARM_CA9 - Compiller: IAR Target: ARM Cortex-A9\n" - " IAR_ARM_CM0 - Compiller: IAR Target: ARM Cortex-M0\n" - " IAR_ARM_CM3 - Compiller: IAR Target: ARM Cortex-M3\n" - " IAR_ARM_CM4F - Compiller: IAR Target: ARM Cortex-M4 with FPU\n" - " IAR_ARM_CM4F_MPU - Compiller: IAR Target: ARM Cortex-M4 with FPU and MPU\n" - " IAR_ARM_CM7 - Compiller: IAR Target: ARM Cortex-M7\n" - " IAR_ARM_CM23_NONSECURE - Compiller: IAR Target: ARM Cortex-M23 non-secure\n" - " IAR_ARM_CM23_SECURE - Compiller: IAR Target: ARM Cortex-M23 secure\n" - " IAR_ARM_CM23_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M23 non-trustzone non-secure\n" - " IAR_ARM_CM33_NONSECURE - Compiller: IAR Target: ARM Cortex-M33 non-secure\n" - " IAR_ARM_CM33_SECURE - Compiller: IAR Target: ARM Cortex-M33 secure\n" - " IAR_ARM_CM33_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M33 non-trustzone non-secure\n" - " IAR_ARM_CM55_NONSECURE - Compiller: IAR Target: ARM Cortex-M55 non-secure\n" - " IAR_ARM_CM55_SECURE - Compiller: IAR Target: ARM Cortex-M55 secure\n" - " IAR_ARM_CM55_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M55 non-trustzone non-secure\n" - " IAR_ARM_CM85_NONSECURE - Compiller: IAR Target: ARM Cortex-M85 non-secure\n" - " IAR_ARM_CM85_SECURE - Compiller: IAR Target: ARM Cortex-M85 secure\n" - " IAR_ARM_CM85_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M85 non-trustzone non-secure\n" - " IAR_ARM_CRX_NOGIC - Compiller: IAR Target: ARM Cortex-Rx no GIC\n" - " IAR_ATMEGA323 - Compiller: IAR Target: ATMega323\n" - " IAR_ATMEL_SAM7S64 - Compiller: IAR Target: Atmel SAM7S64\n" - " IAR_ATMEL_SAM9XE - Compiller: IAR Target: Atmel SAM9XE\n" - " IAR_AVR_AVRDX - Compiller: IAR Target: AVRDx\n" - " IAR_AVR_MEGA0 - Compiller: IAR Target: AVR Mega0\n" - " IAR_AVR32_UC3 - Compiller: IAR Target: AVR32 UC3\n" - " IAR_LPC2000 - Compiller: IAR Target: LPC2000\n" - " IAR_MSP430 - Compiller: IAR Target: MSP430\n" - " IAR_MSP430X - Compiller: IAR Target: MSP430X\n" - " IAR_RISC_V - Compiller: IAR Target: RISC-V\n" - " IAR_RL78 - Compiller: IAR Target: Renesas RL78\n" - " IAR_RX100 - Compiller: IAR Target: Renesas RX100\n" - " IAR_RX600 - Compiller: IAR Target: Renesas RX600\n" - " IAR_RX700_V3_DPFPU - Compiller: IAR Target: Renesas RX700 v3 with DPFPU\n" - " IAR_RX_V2 - Compiller: IAR Target: Renesas RX v2\n" - " IAR_STR71X - Compiller: IAR Target: STR71x\n" - " IAR_STR75X - Compiller: IAR Target: STR75x\n" - " IAR_STR91X - Compiller: IAR Target: STR91x\n" - " IAR_V850ES_FX3 - Compiller: IAR Target: Renesas V850ES/Fx3\n" - " IAR_V850ES_HX3 - Compiller: IAR Target: Renesas V850ES/Hx3\n" - " MIKROC_ARM_CM4F - Compiller: MikroC Target: ARM Cortex-M4 with FPU\n" - " MPLAB_PIC18F - Compiller: MPLAB Target: PIC18F\n" - " MPLAB_PIC24 - Compiller: MPLAB Target: PIC24\n" - " MPLAB_PIC32MEC14XX - Compiller: MPLAB Target: PIC32MEC14xx\n" - " MPLAB_PIC32MX - Compiller: MPLAB Target: PIC32MX\n" - " MPLAB_PIC32MZ - Compiller: MPLAB Target: PIC32MZ\n" - " MSVC_MINGW - Compiller: MSVC or MinGW Target: x86\n" - " OWATCOM_16BIT_DOS_FLSH186 - Compiller: Open Watcom Target: 16 bit DOS Flsh186\n" - " OWATCOM_16BIT_DOS_PC - Compiller: Open Watcom Target: 16 bit DOS PC\n" - " PARADIGM_TERN_EE_LARGE - Compiller: Paradigm Target: Tern EE large\n" - " PARADIGM_TERN_EE_SMALL - Compiller: Paradigm Target: Tern EE small\n" - " RENESAS_RX100 - Compiller: Renesas Target: RX100\n" - " RENESAS_RX200 - Compiller: Renesas Target: RX200\n" - " RENESAS_RX600 - Compiller: Renesas Target: RX600\n" - " RENESAS_RX600_V2 - Compiller: Renesas Target: RX600 v2\n" - " RENESAS_RX700_V3_DPFPU - Compiller: Renesas Target: RX700 v3 with DPFPU\n" - " RENESAS_SH2A_FPU - Compiller: Renesas Target: SH2A with FPU\n" - " ROWLEY_MSP430F449 - Compiller: Rowley Target: MSP430F449\n" - " RVDS_ARM_CA9 - Compiller: RVDS Target: ARM Cortex-A9\n" - " RVDS_ARM_CM0 - Compiller: RVDS Target: ARM Cortex-M0\n" - " RVDS_ARM_CM3 - Compiller: RVDS Target: ARM Cortex-M3\n" - " RVDS_ARM_CM4_MPU - Compiller: RVDS Target: ARM Cortex-M4 with MPU\n" - " RVDS_ARM_CM4F - Compiller: RVDS Target: ARM Cortex-M4 with FPU\n" - " RVDS_ARM_CM7 - Compiller: RVDS Target: ARM Cortex-M7\n" - " RVDS_ARM7_LPC21XX - Compiller: RVDS Target: ARM7 LPC21xx\n" - " SDCC_CYGNAL - Compiller: SDCC Target: Cygnal\n" - " SOFTUNE_MB91460 - Compiller: Softune Target: MB91460\n" - " SOFTUNE_MB96340 - Compiller: Softune Target: MB96340\n" - " TASKING_ARM_CM4F - Compiller: Tasking Target: ARM Cortex-M4 with FPU\n" - " CDK_THEAD_CK802 - Compiller: CDK Target: T-head CK802\n" - " XCC_XTENSA - Compiller: XCC Target: Xtensa\n" - " WIZC_PIC18 - Compiller: WizC Target: PIC18") + " A_CUSTOM_PORT - Compiler: User Defined Target: User Defined\n" + " BCC_16BIT_DOS_FLSH186 - Compiler: BCC Target: 16 bit DOS Flsh186\n" + " BCC_16BIT_DOS_PC - Compiler: BCC Target: 16 bit DOS PC\n" + " CCS_ARM_CM3 - Compiler: CCS Target: ARM Cortex-M3\n" + " CCS_ARM_CM4F - Compiler: CCS Target: ARM Cortex-M4 with FPU\n" + " CCS_ARM_CR4 - Compiler: CCS Target: ARM Cortex-R4\n" + " CCS_MSP430X - Compiler: CCS Target: MSP430X\n" + " CODEWARRIOR_COLDFIRE_V1 - Compiler: CoreWarrior Target: ColdFire V1\n" + " CODEWARRIOR_COLDFIRE_V2 - Compiler: CoreWarrior Target: ColdFire V2\n" + " CODEWARRIOR_HCS12 - Compiler: CoreWarrior Target: HCS12\n" + " GCC_ARM_CA9 - Compiler: GCC Target: ARM Cortex-A9\n" + " GCC_ARM_CA53_64_BIT - Compiler: GCC Target: ARM Cortex-A53 64 bit\n" + " GCC_ARM_CA53_64_BIT_SRE - Compiler: GCC Target: ARM Cortex-A53 64 bit SRE\n" + " GCC_ARM_CM0 - Compiler: GCC Target: ARM Cortex-M0\n" + " GCC_ARM_CM3 - Compiler: GCC Target: ARM Cortex-M3\n" + " GCC_ARM_CM3_MPU - Compiler: GCC Target: ARM Cortex-M3 with MPU\n" + " GCC_ARM_CM4_MPU - Compiler: GCC Target: ARM Cortex-M4 with MPU\n" + " GCC_ARM_CM4F - Compiler: GCC Target: ARM Cortex-M4 with FPU\n" + " GCC_ARM_CM7 - Compiler: GCC Target: ARM Cortex-M7\n" + " GCC_ARM_CM23_NONSECURE - Compiler: GCC Target: ARM Cortex-M23 non-secure\n" + " GCC_ARM_CM23_SECURE - Compiler: GCC Target: ARM Cortex-M23 secure\n" + " GCC_ARM_CM23_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M23 non-trustzone non-secure\n" + " GCC_ARM_CM33_NONSECURE - Compiler: GCC Target: ARM Cortex-M33 non-secure\n" + " GCC_ARM_CM33_SECURE - Compiler: GCC Target: ARM Cortex-M33 secure\n" + " GCC_ARM_CM33_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M33 non-trustzone non-secure\n" + " GCC_ARM_CM33_TFM - Compiler: GCC Target: ARM Cortex-M33 non-secure for TF-M\n" + " GCC_ARM_CM55_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-secure\n" + " GCC_ARM_CM55_SECURE - Compiler: GCC Target: ARM Cortex-M55 secure\n" + " GCC_ARM_CM55_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-trustzone non-secure\n" + " GCC_ARM_CM55_TFM - Compiler: GCC Target: ARM Cortex-M55 non-secure for TF-M\n" + " GCC_ARM_CM85_NONSECURE - Compiler: GCC Target: ARM Cortex-M85 non-secure\n" + " GCC_ARM_CM85_SECURE - Compiler: GCC Target: ARM Cortex-M85 secure\n" + " GCC_ARM_CM85_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M85 non-trustzone non-secure\n" + " GCC_ARM_CM85_TFM - Compiler: GCC Target: ARM Cortex-M85 non-secure for TF-M\n" + " GCC_ARM_CR5 - Compiler: GCC Target: ARM Cortex-R5\n" + " GCC_ARM_CRX_NOGIC - Compiler: GCC Target: ARM Cortex-Rx no GIC\n" + " GCC_ARM7_AT91FR40008 - Compiler: GCC Target: ARM7 Atmel AT91R40008\n" + " GCC_ARM7_AT91SAM7S - Compiler: GCC Target: ARM7 Atmel AT91SAM7S\n" + " GCC_ARM7_LPC2000 - Compiler: GCC Target: ARM7 LPC2000\n" + " GCC_ARM7_LPC23XX - Compiler: GCC Target: ARM7 LPC23xx\n" + " GCC_ATMEGA323 - Compiler: GCC Target: ATMega323\n" + " GCC_AVR32_UC3 - Compiler: GCC Target: AVR32 UC3\n" + " GCC_COLDFIRE_V2 - Compiler: GCC Target: ColdFire V2\n" + " GCC_CORTUS_APS3 - Compiler: GCC Target: CORTUS APS3\n" + " GCC_H8S2329 - Compiler: GCC Target: H8S2329\n" + " GCC_HCS12 - Compiler: GCC Target: HCS12\n" + " GCC_IA32_FLAT - Compiler: GCC Target: IA32 flat\n" + " GCC_MICROBLAZE - Compiler: GCC Target: MicroBlaze\n" + " GCC_MICROBLAZE_V8 - Compiler: GCC Target: MicroBlaze V8\n" + " GCC_MICROBLAZE_V9 - Compiler: GCC Target: MicroBlaze V9\n" + " GCC_MSP430F449 - Compiler: GCC Target: MSP430F449\n" + " GCC_NIOSII - Compiler: GCC Target: NiosII\n" + " GCC_PPC405_XILINX - Compiler: GCC Target: Xilinx PPC405\n" + " GCC_PPC440_XILINX - Compiler: GCC Target: Xilinx PPC440\n" + " GCC_RISC_V - Compiler: GCC Target: RISC-V\n" + " GCC_RISC_V_PULPINO_VEGA_RV32M1RM - Compiler: GCC Target: RISC-V Pulpino Vega RV32M1RM\n" + " GCC_RL78 - Compiler: GCC Target: Renesas RL78\n" + " GCC_RX100 - Compiler: GCC Target: Renesas RX100\n" + " GCC_RX200 - Compiler: GCC Target: Renesas RX200\n" + " GCC_RX600 - Compiler: GCC Target: Renesas RX600\n" + " GCC_RX600_V2 - Compiler: GCC Target: Renesas RX600 v2\n" + " GCC_RX700_V3_DPFPU - Compiler: GCC Target: Renesas RX700 v3 with DPFPU\n" + " GCC_STR75X - Compiler: GCC Target: STR75x\n" + " GCC_TRICORE_1782 - Compiler: GCC Target: TriCore 1782\n" + " GCC_ARC_EM_HS - Compiler: GCC Target: DesignWare ARC EM HS\n" + " GCC_ARC_V1 - Compiler: GCC Target: DesignWare ARC v1\n" + " GCC_ATMEGA - Compiler: GCC Target: ATmega\n" + " GCC_POSIX - Compiler: GCC Target: Posix\n" + " GCC_RP2040 - Compiler: GCC Target: RP2040 ARM Cortex-M0+\n" + " GCC_XTENSA_ESP32 - Compiler: GCC Target: Xtensa ESP32\n" + " GCC_AVRDX - Compiler: GCC Target: AVRDx\n" + " GCC_AVR_MEGA0 - Compiler: GCC Target: AVR Mega0\n" + " IAR_78K0K - Compiler: IAR Target: Renesas 78K0K\n" + " IAR_ARM_CA5_NOGIC - Compiler: IAR Target: ARM Cortex-A5 no GIC\n" + " IAR_ARM_CA9 - Compiler: IAR Target: ARM Cortex-A9\n" + " IAR_ARM_CM0 - Compiler: IAR Target: ARM Cortex-M0\n" + " IAR_ARM_CM3 - Compiler: IAR Target: ARM Cortex-M3\n" + " IAR_ARM_CM4F - Compiler: IAR Target: ARM Cortex-M4 with FPU\n" + " IAR_ARM_CM4F_MPU - Compiler: IAR Target: ARM Cortex-M4 with FPU and MPU\n" + " IAR_ARM_CM7 - Compiler: IAR Target: ARM Cortex-M7\n" + " IAR_ARM_CM23_NONSECURE - Compiler: IAR Target: ARM Cortex-M23 non-secure\n" + " IAR_ARM_CM23_SECURE - Compiler: IAR Target: ARM Cortex-M23 secure\n" + " IAR_ARM_CM23_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M23 non-trustzone non-secure\n" + " IAR_ARM_CM33_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-secure\n" + " IAR_ARM_CM33_SECURE - Compiler: IAR Target: ARM Cortex-M33 secure\n" + " IAR_ARM_CM33_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-trustzone non-secure\n" + " IAR_ARM_CM55_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-secure\n" + " IAR_ARM_CM55_SECURE - Compiler: IAR Target: ARM Cortex-M55 secure\n" + " IAR_ARM_CM55_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-trustzone non-secure\n" + " IAR_ARM_CM85_NONSECURE - Compiler: IAR Target: ARM Cortex-M85 non-secure\n" + " IAR_ARM_CM85_SECURE - Compiler: IAR Target: ARM Cortex-M85 secure\n" + " IAR_ARM_CM85_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M85 non-trustzone non-secure\n" + " IAR_ARM_CRX_NOGIC - Compiler: IAR Target: ARM Cortex-Rx no GIC\n" + " IAR_ATMEGA323 - Compiler: IAR Target: ATMega323\n" + " IAR_ATMEL_SAM7S64 - Compiler: IAR Target: Atmel SAM7S64\n" + " IAR_ATMEL_SAM9XE - Compiler: IAR Target: Atmel SAM9XE\n" + " IAR_AVR_AVRDX - Compiler: IAR Target: AVRDx\n" + " IAR_AVR_MEGA0 - Compiler: IAR Target: AVR Mega0\n" + " IAR_AVR32_UC3 - Compiler: IAR Target: AVR32 UC3\n" + " IAR_LPC2000 - Compiler: IAR Target: LPC2000\n" + " IAR_MSP430 - Compiler: IAR Target: MSP430\n" + " IAR_MSP430X - Compiler: IAR Target: MSP430X\n" + " IAR_RISC_V - Compiler: IAR Target: RISC-V\n" + " IAR_RL78 - Compiler: IAR Target: Renesas RL78\n" + " IAR_RX100 - Compiler: IAR Target: Renesas RX100\n" + " IAR_RX600 - Compiler: IAR Target: Renesas RX600\n" + " IAR_RX700_V3_DPFPU - Compiler: IAR Target: Renesas RX700 v3 with DPFPU\n" + " IAR_RX_V2 - Compiler: IAR Target: Renesas RX v2\n" + " IAR_STR71X - Compiler: IAR Target: STR71x\n" + " IAR_STR75X - Compiler: IAR Target: STR75x\n" + " IAR_STR91X - Compiler: IAR Target: STR91x\n" + " IAR_V850ES_FX3 - Compiler: IAR Target: Renesas V850ES/Fx3\n" + " IAR_V850ES_HX3 - Compiler: IAR Target: Renesas V850ES/Hx3\n" + " MIKROC_ARM_CM4F - Compiler: MikroC Target: ARM Cortex-M4 with FPU\n" + " MPLAB_PIC18F - Compiler: MPLAB Target: PIC18F\n" + " MPLAB_PIC24 - Compiler: MPLAB Target: PIC24\n" + " MPLAB_PIC32MEC14XX - Compiler: MPLAB Target: PIC32MEC14xx\n" + " MPLAB_PIC32MX - Compiler: MPLAB Target: PIC32MX\n" + " MPLAB_PIC32MZ - Compiler: MPLAB Target: PIC32MZ\n" + " MSVC_MINGW - Compiler: MSVC or MinGW Target: x86\n" + " OWATCOM_16BIT_DOS_FLSH186 - Compiler: Open Watcom Target: 16 bit DOS Flsh186\n" + " OWATCOM_16BIT_DOS_PC - Compiler: Open Watcom Target: 16 bit DOS PC\n" + " PARADIGM_TERN_EE_LARGE - Compiler: Paradigm Target: Tern EE large\n" + " PARADIGM_TERN_EE_SMALL - Compiler: Paradigm Target: Tern EE small\n" + " RENESAS_RX100 - Compiler: Renesas Target: RX100\n" + " RENESAS_RX200 - Compiler: Renesas Target: RX200\n" + " RENESAS_RX600 - Compiler: Renesas Target: RX600\n" + " RENESAS_RX600_V2 - Compiler: Renesas Target: RX600 v2\n" + " RENESAS_RX700_V3_DPFPU - Compiler: Renesas Target: RX700 v3 with DPFPU\n" + " RENESAS_SH2A_FPU - Compiler: Renesas Target: SH2A with FPU\n" + " ROWLEY_MSP430F449 - Compiler: Rowley Target: MSP430F449\n" + " RVDS_ARM_CA9 - Compiler: RVDS Target: ARM Cortex-A9\n" + " RVDS_ARM_CM0 - Compiler: RVDS Target: ARM Cortex-M0\n" + " RVDS_ARM_CM3 - Compiler: RVDS Target: ARM Cortex-M3\n" + " RVDS_ARM_CM4_MPU - Compiler: RVDS Target: ARM Cortex-M4 with MPU\n" + " RVDS_ARM_CM4F - Compiler: RVDS Target: ARM Cortex-M4 with FPU\n" + " RVDS_ARM_CM7 - Compiler: RVDS Target: ARM Cortex-M7\n" + " RVDS_ARM7_LPC21XX - Compiler: RVDS Target: ARM7 LPC21xx\n" + " SDCC_CYGNAL - Compiler: SDCC Target: Cygnal\n" + " SOFTUNE_MB91460 - Compiler: Softune Target: MB91460\n" + " SOFTUNE_MB96340 - Compiler: Softune Target: MB96340\n" + " TASKING_ARM_CM4F - Compiler: Tasking Target: ARM Cortex-M4 with FPU\n" + " CDK_THEAD_CK802 - Compiler: CDK Target: T-head CK802\n" + " XCC_XTENSA - Compiler: XCC Target: Xtensa\n" + " WIZC_PIC18 - Compiler: WizC Target: PIC18") + # Native FREERTOS_PORT for Linux and Windows MINGW builds + if(UNIX) + message(STATUS " Auto-Detected Unix, setting FREERTOS_PORT=GCC_POSIX") + set(FREERTOS_PORT GCC_POSIX CACHE STRING "FreeRTOS port name") + elseif(MINGW) + message(STATUS " Auto-Detected MINGW, setting FREERTOS_PORT=MSVC_MINGW") + set(FREERTOS_PORT MSVC_MINGW CACHE STRING "FreeRTOS port name") + endif() +elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_port) ) + message(FATAL_ERROR " FREERTOS_PORT is set to A_CUSTOM_PORT. Please specify the custom port target with all necessary files. For example:\n" + " Assuming a directory of:\n" + " FreeRTOSCustomPort/\n" + " CMakeLists.txt\n" + " port.c\n" + " portmacro.h\n" + " Where FreeRTOSCustomPort/CMakeLists.txt is a modified version of:\n" + " add_library(freertos_kernel_port STATIC)\n" + " target_sources(freertos_kernel_port\n" + " PRIVATE\n" + " port.c\n" + " portmacro.h)\n" + " target_include_directories(freertos_kernel_port\n" + " PUBLIC\n" + " .)\n" + " target_link_libraries(freertos_kernel_port\n" + " PRIVATE\n" + " freertos_kernel)") endif() ######################################################################## @@ -250,7 +280,8 @@ add_library(freertos_kernel STATIC target_include_directories(freertos_kernel PUBLIC include - ${FREERTOS_CONFIG_FILE_DIRECTORY} + # Note: DEPRECATED but still supported, may be removed in a future release. + $<$>:${FREERTOS_CONFIG_FILE_DIRECTORY}> ) target_link_libraries(freertos_kernel diff --git a/event_groups.c b/event_groups.c index 19c4b68f3f4..80e220d95f0 100644 --- a/event_groups.c +++ b/event_groups.c @@ -105,10 +105,15 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, /* A StaticEventGroup_t object must be provided. */ configASSERT( pxEventGroupBuffer ); - /* Sanity check that the size of the structure used to declare a - * variable of type StaticEventGroup_t equals the size of the real - * event group structure. */ - configASSERT( sizeof( StaticEventGroup_t ) == sizeof( EventGroup_t ) ); + #if ( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + * variable of type StaticEventGroup_t equals the size of the real + * event group structure. */ + volatile size_t xSize = sizeof( StaticEventGroup_t ); + configASSERT( xSize == sizeof( EventGroup_t ) ); + } /*lint !e529 xSize is referenced if configASSERT() is defined. */ + #endif /* configASSERT_DEFINED */ /* The user has provided a statically allocated event group - use it. */ pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer; /*lint !e740 !e9087 EventGroup_t and StaticEventGroup_t are deliberately aliased for data hiding purposes and guaranteed to have the same size and alignment requirement - checked by configASSERT(). */ @@ -516,15 +521,15 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/include/task.h b/include/task.h index 298c0d60781..08c14dd6ff1 100644 --- a/include/task.h +++ b/include/task.h @@ -1681,21 +1681,6 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL #endif -#if ( configUSE_IDLE_HOOK != 0 ) - -/** - * task.h - * @code{c} - * void vApplicationTickHook( void ); - * @endcode - * - * This hook function is called in the ilde task handler - * @note: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, CALL A FUNCTION THAT MIGHT BLOCK. - */ - void vApplicationIdleHook( void ); - -#endif - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) /** diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index 9199ff7ed9a..ae9de9cfe1b 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -990,6 +990,10 @@ target_include_directories(freertos_kernel_port PUBLIC $<$:${CMAKE_CURRENT_LIST_DIR}/WizC/PIC18> ) +if(FREERTOS_PORT STREQUAL GCC_POSIX) + find_package(Threads REQUIRED) +endif() + target_link_libraries(freertos_kernel_port PUBLIC $<$:pico_base_headers> diff --git a/portable/GCC/RX100/portmacro.h b/portable/GCC/RX100/portmacro.h index 312764d9eb0..863596685aa 100644 --- a/portable/GCC/RX100/portmacro.h +++ b/portable/GCC/RX100/portmacro.h @@ -128,7 +128,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) /* Tickless idle/low power functionality. */ #if configUSE_TICKLESS_IDLE == 1 diff --git a/portable/GCC/RX200/portmacro.h b/portable/GCC/RX200/portmacro.h index 4f96afd82df..80fc8c8041d 100644 --- a/portable/GCC/RX200/portmacro.h +++ b/portable/GCC/RX200/portmacro.h @@ -131,7 +131,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600/portmacro.h b/portable/GCC/RX600/portmacro.h index dfd4cecc0c4..7e1fd1e0c92 100644 --- a/portable/GCC/RX600/portmacro.h +++ b/portable/GCC/RX600/portmacro.h @@ -131,7 +131,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600v2/portmacro.h b/portable/GCC/RX600v2/portmacro.h index dfd4cecc0c4..7e1fd1e0c92 100644 --- a/portable/GCC/RX600v2/portmacro.h +++ b/portable/GCC/RX600v2/portmacro.h @@ -131,7 +131,7 @@ extern void vTaskExitCritical( void ); uint32_t ulPortGetIPL( void ) __attribute__((naked)); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX700v3_DPFPU/portmacro.h b/portable/GCC/RX700v3_DPFPU/portmacro.h index ae23a710322..6b76374c882 100644 --- a/portable/GCC/RX700v3_DPFPU/portmacro.h +++ b/portable/GCC/RX700v3_DPFPU/portmacro.h @@ -155,7 +155,7 @@ uint32_t ulPortGetIPL( void ) __attribute__( ( naked ) ); void vPortSetIPL( uint32_t ulNewIPL ) __attribute__( ( naked ) ); #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) vPortSetIPL( (uint32_t) xSavedInterruptStatus ) + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX100/portmacro.h b/portable/IAR/RX100/portmacro.h index c6d78262d37..d408a1389a5 100644 --- a/portable/IAR/RX100/portmacro.h +++ b/portable/IAR/RX100/portmacro.h @@ -124,7 +124,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) /* Tickless idle/low power functionality. */ #if configUSE_TICKLESS_IDLE == 1 diff --git a/portable/IAR/RX600/portmacro.h b/portable/IAR/RX600/portmacro.h index 170aa55ffaf..e5decdaf34a 100644 --- a/portable/IAR/RX600/portmacro.h +++ b/portable/IAR/RX600/portmacro.h @@ -126,7 +126,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX700v3_DPFPU/portmacro.h b/portable/IAR/RX700v3_DPFPU/portmacro.h index 1112dc00df5..e0204fa0c3c 100644 --- a/portable/IAR/RX700v3_DPFPU/portmacro.h +++ b/portable/IAR/RX700v3_DPFPU/portmacro.h @@ -159,7 +159,7 @@ /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RXv2/portmacro.h b/portable/IAR/RXv2/portmacro.h index 248a467378a..984a4fb50e2 100644 --- a/portable/IAR/RXv2/portmacro.h +++ b/portable/IAR/RXv2/portmacro.h @@ -126,7 +126,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() __get_interrupt_level(); portDISABLE_INTERRUPTS() -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( xSavedInterruptStatus ) ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) __set_interrupt_level( ( uint8_t ) ( uxSavedInterruptStatus ) ) /*-----------------------------------------------------------*/ diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 4ad3b17b07b..961f9961e20 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -390,7 +390,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ { uxAddress += ( portBYTE_ALIGNMENT - 1 ); uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap ); + xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; } pucAlignedHeap = ( uint8_t * ) uxAddress; @@ -402,7 +402,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); + uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; uxAddress -= xHeapStructSize; uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); pxEnd = ( BlockLink_t * ) uxAddress; diff --git a/portable/Renesas/RX100/portmacro.h b/portable/Renesas/RX100/portmacro.h index 2ed57df2bc7..9bcbd3c8c3d 100644 --- a/portable/Renesas/RX100/portmacro.h +++ b/portable/Renesas/RX100/portmacro.h @@ -128,7 +128,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() ( UBaseType_t ) get_ipl(); set_ipl( ( signed long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( signed long ) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( signed long ) uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX200/portmacro.h b/portable/Renesas/RX200/portmacro.h index 59c4e87b80c..62a085023e2 100644 --- a/portable/Renesas/RX200/portmacro.h +++ b/portable/Renesas/RX200/portmacro.h @@ -128,7 +128,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600/portmacro.h b/portable/Renesas/RX600/portmacro.h index 6bb7df05508..3b29cbddb50 100644 --- a/portable/Renesas/RX600/portmacro.h +++ b/portable/Renesas/RX600/portmacro.h @@ -129,7 +129,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600v2/portmacro.h b/portable/Renesas/RX600v2/portmacro.h index 3fd6476c4c2..d67a8f892aa 100644 --- a/portable/Renesas/RX600v2/portmacro.h +++ b/portable/Renesas/RX600v2/portmacro.h @@ -129,7 +129,7 @@ extern void vTaskExitCritical( void ); /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() ( UBaseType_t ) get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) -#define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX700v3_DPFPU/portmacro.h b/portable/Renesas/RX700v3_DPFPU/portmacro.h index c01b575cc8f..12657ee82bf 100644 --- a/portable/Renesas/RX700v3_DPFPU/portmacro.h +++ b/portable/Renesas/RX700v3_DPFPU/portmacro.h @@ -154,7 +154,7 @@ /* As this port allows interrupt nesting... */ #define portSET_INTERRUPT_MASK_FROM_ISR() ( UBaseType_t ) get_ipl(); set_ipl( ( long ) configMAX_SYSCALL_INTERRUPT_PRIORITY ) - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ) set_ipl( ( long ) xSavedInterruptStatus ) + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) set_ipl( ( long ) uxSavedInterruptStatus ) /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h b/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h index 6f6efa3ea72..65e3e5481dd 100644 --- a/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h +++ b/portable/ThirdParty/GCC/Posix/utils/wait_for_event.h @@ -26,8 +26,8 @@ * */ -#ifndef WAIT_FOR_EVENT_H_ -#define WAIT_FOR_EVENT_H_ +#ifndef _WAIT_FOR_EVENT_H_ +#define _WAIT_FOR_EVENT_H_ #include #include @@ -43,4 +43,4 @@ void event_signal( struct event * ev ); -#endif /* ifndef WAIT_FOR_EVENT_H_ */ +#endif /* ifndef _WAIT_FOR_EVENT_H_ */ diff --git a/stream_buffer.c b/stream_buffer.c index 5174d58c2b8..7956b1c2cbd 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -444,10 +444,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, * to hold at least one message. */ configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); - /* Sanity check that the size of the structure used to declare a - * variable of type StaticStreamBuffer_t equals the size of the real - * message buffer structure. */ - configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); + #if ( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + * variable of type StaticStreamBuffer_t equals the size of the real + * message buffer structure. */ + configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); + #endif /* configASSERT_DEFINED */ if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) { @@ -1371,12 +1374,15 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, /* Assert here is deliberately writing to the entire buffer to ensure it can * be written to without generating exceptions, and is setting the buffer to a * known value to assist in development/debugging. */ + #if ( configASSERT_DEFINED == 1 ) + { + /* The value written just has to be identifiable when looking at the + * memory. Don't use 0xA5 as that is the stack fill value and could + * result in confusion as to what is actually being observed. */ #define STREAM_BUFFER_BUFFER_WRITE_VALUE ( 0x55 ) - - /* The value written just has to be identifiable when looking at the - * memory. Don't use 0xA5 as that is the stack fill value and could - * result in confusion as to what is actually being observed. */ - configASSERT( memset( pucBuffer, ( int ) STREAM_BUFFER_BUFFER_WRITE_VALUE, xBufferSizeBytes ) == pucBuffer ); + configASSERT( memset( pucBuffer, ( int ) STREAM_BUFFER_BUFFER_WRITE_VALUE, xBufferSizeBytes ) == pucBuffer ); + } + #endif ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ pxStreamBuffer->pucBuffer = pucBuffer; From c3a0d94f05e143da6c97318f8bdb38b1c7ed35ff Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Thu, 9 Feb 2023 13:39:36 -0800 Subject: [PATCH 15/24] Fixing compiler errors due to merge issues and formatting. --- stream_buffer.c | 2 +- timers.c | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/stream_buffer.c b/stream_buffer.c index 7956b1c2cbd..eadc0782abe 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -1379,7 +1379,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, /* The value written just has to be identifiable when looking at the * memory. Don't use 0xA5 as that is the stack fill value and could * result in confusion as to what is actually being observed. */ -#define STREAM_BUFFER_BUFFER_WRITE_VALUE ( 0x55 ) + #define STREAM_BUFFER_BUFFER_WRITE_VALUE ( 0x55 ) configASSERT( memset( pucBuffer, ( int ) STREAM_BUFFER_BUFFER_WRITE_VALUE, xBufferSizeBytes ) == pucBuffer ); } #endif diff --git a/timers.c b/timers.c index 332384fdb48..324b95a7207 100644 --- a/timers.c +++ b/timers.c @@ -564,10 +564,8 @@ pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); } /*-----------------------------------------------------------*/ - static void prvTimerTask( void * pvParameters ) __attribute__( ( __noreturn__ ) ); - - portTASK_FUNCTION( prvTimerTask, pvParameters ) + static portTASK_FUNCTION( prvTimerTask, pvParameters ) __attribute__( ( __noreturn__ ) ); { TickType_t xNextExpireTime; BaseType_t xListWasEmpty; From 0f5b7cf5a1ddd269e96058dfa84837a76ae9351a Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Thu, 9 Feb 2023 13:42:31 -0800 Subject: [PATCH 16/24] Fixing Line feeds. --- portable/MemMang/heap_4.c | 1090 ++++---- queue.c | 5592 ++++++++++++++++++------------------- 2 files changed, 3341 insertions(+), 3341 deletions(-) diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 961f9961e20..a9cfa45531e 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -1,545 +1,545 @@ -/* - * FreeRTOS Kernel - * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of - * this software and associated documentation files (the "Software"), to deal in - * the Software without restriction, including without limitation the rights to - * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - * the Software, and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * https://www.FreeRTOS.org - * https://github.com/FreeRTOS - * - */ - -/* - * A sample implementation of pvPortMalloc() and vPortFree() that combines - * (coalescences) adjacent memory blocks as they are freed, and in so doing - * limits memory fragmentation. - * - * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the - * memory management pages of https://www.FreeRTOS.org for more information. - */ -#include -#include - -/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining - * all the API functions to use the MPU wrappers. That should only be done when - * task.h is included from an application file. */ -#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE - -#include "FreeRTOS.h" -#include "task.h" - -#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE - -#if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) - #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 -#endif - -#ifndef configHEAP_CLEAR_MEMORY_ON_FREE - #define configHEAP_CLEAR_MEMORY_ON_FREE 0 -#endif - -/* Block sizes must not get too small. */ -#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) - -/* Assumes 8bit bytes! */ -#define heapBITS_PER_BYTE ( ( size_t ) 8 ) - -/* Max value that fits in a size_t type. */ -#define heapSIZE_MAX ( ~( ( size_t ) 0 ) ) - -/* Check if multiplying a and b will result in overflow. */ -#define heapMULTIPLY_WILL_OVERFLOW( a, b ) ( ( ( a ) > 0 ) && ( ( b ) > ( heapSIZE_MAX / ( a ) ) ) ) - -/* Check if adding a and b will result in overflow. */ -#define heapADD_WILL_OVERFLOW( a, b ) ( ( a ) > ( heapSIZE_MAX - ( b ) ) ) - -/* MSB of the xBlockSize member of an BlockLink_t structure is used to track - * the allocation status of a block. When MSB of the xBlockSize member of - * an BlockLink_t structure is set then the block belongs to the application. - * When the bit is free the block is still part of the free heap space. */ -#define heapBLOCK_ALLOCATED_BITMASK ( ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ) ) -#define heapBLOCK_SIZE_IS_VALID( xBlockSize ) ( ( ( xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) == 0 ) -#define heapBLOCK_IS_ALLOCATED( pxBlock ) ( ( ( pxBlock->xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) != 0 ) -#define heapALLOCATE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) |= heapBLOCK_ALLOCATED_BITMASK ) -#define heapFREE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) &= ~heapBLOCK_ALLOCATED_BITMASK ) - -/*-----------------------------------------------------------*/ - -/* Allocate the memory for the heap. */ -#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) - -/* The application writer has already defined the array used for the RTOS -* heap - probably so it can be placed in a special segment or address. */ - extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; -#else - PRIVILEGED_DATA static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; -#endif /* configAPPLICATION_ALLOCATED_HEAP */ - -/* Define the linked list structure. This is used to link free blocks in order - * of their memory address. */ -typedef struct A_BLOCK_LINK -{ - struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ - size_t xBlockSize; /**< The size of the free block. */ -} BlockLink_t; - -/*-----------------------------------------------------------*/ - -/* - * Inserts a block of memory that is being freed into the correct position in - * the list of free memory blocks. The block being freed will be merged with - * the block in front it and/or the block behind it if the memory blocks are - * adjacent to each other. - */ -static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) PRIVILEGED_FUNCTION; - -/* - * Called automatically to setup the required heap structures the first time - * pvPortMalloc() is called. - */ -static void prvHeapInit( void ) PRIVILEGED_FUNCTION; - -/*-----------------------------------------------------------*/ - -/* The size of the structure placed at the beginning of each allocated memory - * block must by correctly byte aligned. */ -static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); - -/* Create a couple of list links to mark the start and end of the list. */ -PRIVILEGED_DATA static BlockLink_t xStart; -PRIVILEGED_DATA static BlockLink_t * pxEnd = NULL; - -/* Keeps track of the number of calls to allocate and free memory as well as the - * number of free bytes remaining, but says nothing about fragmentation. */ -PRIVILEGED_DATA static size_t xFreeBytesRemaining = 0U; -PRIVILEGED_DATA static size_t xMinimumEverFreeBytesRemaining = 0U; -PRIVILEGED_DATA static size_t xNumberOfSuccessfulAllocations = 0; -PRIVILEGED_DATA static size_t xNumberOfSuccessfulFrees = 0; - -/*-----------------------------------------------------------*/ - -void * pvPortMalloc( size_t xWantedSize ) -{ - BlockLink_t * pxBlock; - BlockLink_t * pxPreviousBlock; - BlockLink_t * pxNewBlockLink; - void * pvReturn = NULL; - size_t xAdditionalRequiredSize; - - vTaskSuspendAll(); - { - /* If this is the first call to malloc then the heap will require - * initialisation to setup the list of free blocks. */ - if( pxEnd == NULL ) - { - prvHeapInit(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - if( xWantedSize > 0 ) - { - /* The wanted size must be increased so it can contain a BlockLink_t - * structure in addition to the requested amount of bytes. Some - * additional increment may also be needed for alignment. */ - xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); - - if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) - { - xWantedSize += xAdditionalRequiredSize; - } - else - { - xWantedSize = 0; - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Check the block size we are trying to allocate is not so large that the - * top bit is set. The top bit of the block size member of the BlockLink_t - * structure is used to determine who owns the block - the application or - * the kernel, so it must be free. */ - if( heapBLOCK_SIZE_IS_VALID( xWantedSize ) != 0 ) - { - if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) - { - /* Traverse the list from the start (lowest address) block until - * one of adequate size is found. */ - pxPreviousBlock = &xStart; - pxBlock = xStart.pxNextFreeBlock; - - while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) - { - pxPreviousBlock = pxBlock; - pxBlock = pxBlock->pxNextFreeBlock; - } - - /* If the end marker was reached then a block of adequate size - * was not found. */ - if( pxBlock != pxEnd ) - { - /* Return the memory space pointed to - jumping over the - * BlockLink_t structure at its start. */ - pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); - - /* This block is being returned for use so must be taken out - * of the list of free blocks. */ - pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; - - /* If the block is larger than required it can be split into - * two. */ - if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) - { - /* This block is to be split into two. Create a new - * block following the number of bytes requested. The void - * cast is used to prevent byte alignment warnings from the - * compiler. */ - pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); - configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); - - /* Calculate the sizes of two blocks split from the - * single block. */ - pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; - pxBlock->xBlockSize = xWantedSize; - - /* Insert the new block into the list of free blocks. */ - prvInsertBlockIntoFreeList( pxNewBlockLink ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - xFreeBytesRemaining -= pxBlock->xBlockSize; - - if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) - { - xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* The block is being returned - it is allocated and owned - * by the application and has no "next" block. */ - heapALLOCATE_BLOCK( pxBlock ); - pxBlock->pxNextFreeBlock = NULL; - xNumberOfSuccessfulAllocations++; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - traceMALLOC( pvReturn, xWantedSize ); - } - ( void ) xTaskResumeAll(); - - #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) - { - if( pvReturn == NULL ) - { - vApplicationMallocFailedHook(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */ - - configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); - return pvReturn; -} -/*-----------------------------------------------------------*/ - -void vPortFree( void * pv ) -{ - uint8_t * puc = ( uint8_t * ) pv; - BlockLink_t * pxLink; - - if( pv != NULL ) - { - /* The memory being freed will have an BlockLink_t structure immediately - * before it. */ - puc -= xHeapStructSize; - - /* This casting is to keep the compiler from issuing warnings. */ - pxLink = ( void * ) puc; - - configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ); - configASSERT( pxLink->pxNextFreeBlock == NULL ); - - if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ) - { - if( pxLink->pxNextFreeBlock == NULL ) - { - /* The block is being returned to the heap - it is no longer - * allocated. */ - heapFREE_BLOCK( pxLink ); - #if ( configHEAP_CLEAR_MEMORY_ON_FREE == 1 ) - { - ( void ) memset( puc + xHeapStructSize, 0, pxLink->xBlockSize - xHeapStructSize ); - } - #endif - - vTaskSuspendAll(); - { - /* Add this block to the list of free blocks. */ - xFreeBytesRemaining += pxLink->xBlockSize; - traceFREE( pv, pxLink->xBlockSize ); - prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); - xNumberOfSuccessfulFrees++; - } - ( void ) xTaskResumeAll(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } -} -/*-----------------------------------------------------------*/ - -size_t xPortGetFreeHeapSize( void ) -{ - return xFreeBytesRemaining; -} -/*-----------------------------------------------------------*/ - -size_t xPortGetMinimumEverFreeHeapSize( void ) -{ - return xMinimumEverFreeBytesRemaining; -} -/*-----------------------------------------------------------*/ - -void vPortInitialiseBlocks( void ) -{ - /* This just exists to keep the linker quiet. */ -} -/*-----------------------------------------------------------*/ - -void * pvPortCalloc( size_t xNum, - size_t xSize ) -{ - void * pv = NULL; - - if( heapMULTIPLY_WILL_OVERFLOW( xNum, xSize ) == 0 ) - { - pv = pvPortMalloc( xNum * xSize ); - - if( pv != NULL ) - { - ( void ) memset( pv, 0, xNum * xSize ); - } - } - - return pv; -} -/*-----------------------------------------------------------*/ - -static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ -{ - BlockLink_t * pxFirstFreeBlock; - uint8_t * pucAlignedHeap; - portPOINTER_SIZE_TYPE uxAddress; - size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; - - /* Ensure the heap starts on a correctly aligned boundary. */ - uxAddress = ( portPOINTER_SIZE_TYPE ) ucHeap; - - if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) - { - uxAddress += ( portBYTE_ALIGNMENT - 1 ); - uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; - } - - pucAlignedHeap = ( uint8_t * ) uxAddress; - - /* xStart is used to hold a pointer to the first item in the list of free - * blocks. The void cast is used to prevent compiler warnings. */ - xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; - xStart.xBlockSize = ( size_t ) 0; - - /* pxEnd is used to mark the end of the list of free blocks and is inserted - * at the end of the heap space. */ - uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; - uxAddress -= xHeapStructSize; - uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - pxEnd = ( BlockLink_t * ) uxAddress; - pxEnd->xBlockSize = 0; - pxEnd->pxNextFreeBlock = NULL; - - /* To start with there is a single free block that is sized to take up the - * entire heap space, minus the space taken by pxEnd. */ - #if defined( __clang__ ) - /* Alignment checked above with calculations on uxAddress */ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wcast-align" - #endif /* defined(__clang__) */ - pxFirstFreeBlock = ( BlockLink_t * ) pucAlignedHeap; - #if defined( __clang__ ) - #pragma clang diagnostic pop - #endif /* defined(__clang__) */ - pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock ); - pxFirstFreeBlock->pxNextFreeBlock = pxEnd; - - /* Only one block exists - and it covers the entire usable heap space. */ - xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; - xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; -} -/*-----------------------------------------------------------*/ - -static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) /* PRIVILEGED_FUNCTION */ -{ - BlockLink_t * pxIterator; - uint8_t * puc; - - /* Iterate through the list until a block is found that has a higher address - * than the block being inserted. */ - for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) - { - /* Nothing to do here, just iterate to the right position. */ - } - - /* Do the block being inserted, and the block it is being inserted after - * make a contiguous block of memory? */ - puc = ( uint8_t * ) pxIterator; - - if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) - { - pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; - pxBlockToInsert = pxIterator; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Do the block being inserted, and the block it is being inserted before - * make a contiguous block of memory? */ - puc = ( uint8_t * ) pxBlockToInsert; - - if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) - { - if( pxIterator->pxNextFreeBlock != pxEnd ) - { - /* Form one big block from the two blocks. */ - pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; - pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; - } - else - { - pxBlockToInsert->pxNextFreeBlock = pxEnd; - } - } - else - { - pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; - } - - /* If the block being inserted plugged a gab, so was merged with the block - * before and the block after, then it's pxNextFreeBlock pointer will have - * already been set, and should not be set here as that would make it point - * to itself. */ - if( pxIterator != pxBlockToInsert ) - { - pxIterator->pxNextFreeBlock = pxBlockToInsert; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } -} -/*-----------------------------------------------------------*/ - -void vPortGetHeapStats( HeapStats_t * pxHeapStats ) -{ - BlockLink_t * pxBlock; - size_t xBlocks = 0, xMaxSize = 0, xMinSize = portMAX_DELAY; /* portMAX_DELAY used as a portable way of getting the maximum value. */ - - vTaskSuspendAll(); - { - pxBlock = xStart.pxNextFreeBlock; - - /* pxBlock will be NULL if the heap has not been initialised. The heap - * is initialised automatically when the first allocation is made. */ - if( pxBlock != NULL ) - { - while( pxBlock != pxEnd ) - { - /* Increment the number of blocks and record the largest block seen - * so far. */ - xBlocks++; - - if( pxBlock->xBlockSize > xMaxSize ) - { - xMaxSize = pxBlock->xBlockSize; - } - - if( pxBlock->xBlockSize < xMinSize ) - { - xMinSize = pxBlock->xBlockSize; - } - - /* Move to the next block in the chain until the last block is - * reached. */ - pxBlock = pxBlock->pxNextFreeBlock; - } - } - } - ( void ) xTaskResumeAll(); - - pxHeapStats->xSizeOfLargestFreeBlockInBytes = xMaxSize; - pxHeapStats->xSizeOfSmallestFreeBlockInBytes = xMinSize; - pxHeapStats->xNumberOfFreeBlocks = xBlocks; - - taskENTER_CRITICAL(); - { - pxHeapStats->xAvailableHeapSpaceInBytes = xFreeBytesRemaining; - pxHeapStats->xNumberOfSuccessfulAllocations = xNumberOfSuccessfulAllocations; - pxHeapStats->xNumberOfSuccessfulFrees = xNumberOfSuccessfulFrees; - pxHeapStats->xMinimumEverFreeBytesRemaining = xMinimumEverFreeBytesRemaining; - } - taskEXIT_CRITICAL(); -} -/*-----------------------------------------------------------*/ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* + * A sample implementation of pvPortMalloc() and vPortFree() that combines + * (coalescences) adjacent memory blocks as they are freed, and in so doing + * limits memory fragmentation. + * + * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the + * memory management pages of https://www.FreeRTOS.org for more information. + */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) + #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 +#endif + +#ifndef configHEAP_CLEAR_MEMORY_ON_FREE + #define configHEAP_CLEAR_MEMORY_ON_FREE 0 +#endif + +/* Block sizes must not get too small. */ +#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define heapBITS_PER_BYTE ( ( size_t ) 8 ) + +/* Max value that fits in a size_t type. */ +#define heapSIZE_MAX ( ~( ( size_t ) 0 ) ) + +/* Check if multiplying a and b will result in overflow. */ +#define heapMULTIPLY_WILL_OVERFLOW( a, b ) ( ( ( a ) > 0 ) && ( ( b ) > ( heapSIZE_MAX / ( a ) ) ) ) + +/* Check if adding a and b will result in overflow. */ +#define heapADD_WILL_OVERFLOW( a, b ) ( ( a ) > ( heapSIZE_MAX - ( b ) ) ) + +/* MSB of the xBlockSize member of an BlockLink_t structure is used to track + * the allocation status of a block. When MSB of the xBlockSize member of + * an BlockLink_t structure is set then the block belongs to the application. + * When the bit is free the block is still part of the free heap space. */ +#define heapBLOCK_ALLOCATED_BITMASK ( ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ) ) +#define heapBLOCK_SIZE_IS_VALID( xBlockSize ) ( ( ( xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) == 0 ) +#define heapBLOCK_IS_ALLOCATED( pxBlock ) ( ( ( pxBlock->xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) != 0 ) +#define heapALLOCATE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) |= heapBLOCK_ALLOCATED_BITMASK ) +#define heapFREE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) &= ~heapBLOCK_ALLOCATED_BITMASK ) + +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#else + PRIVILEGED_DATA static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/* Define the linked list structure. This is used to link free blocks in order + * of their memory address. */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; + +/*-----------------------------------------------------------*/ + +/* + * Inserts a block of memory that is being freed into the correct position in + * the list of free memory blocks. The block being freed will be merged with + * the block in front it and/or the block behind it if the memory blocks are + * adjacent to each other. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) PRIVILEGED_FUNCTION; + +/* + * Called automatically to setup the required heap structures the first time + * pvPortMalloc() is called. + */ +static void prvHeapInit( void ) PRIVILEGED_FUNCTION; + +/*-----------------------------------------------------------*/ + +/* The size of the structure placed at the beginning of each allocated memory + * block must by correctly byte aligned. */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + +/* Create a couple of list links to mark the start and end of the list. */ +PRIVILEGED_DATA static BlockLink_t xStart; +PRIVILEGED_DATA static BlockLink_t * pxEnd = NULL; + +/* Keeps track of the number of calls to allocate and free memory as well as the + * number of free bytes remaining, but says nothing about fragmentation. */ +PRIVILEGED_DATA static size_t xFreeBytesRemaining = 0U; +PRIVILEGED_DATA static size_t xMinimumEverFreeBytesRemaining = 0U; +PRIVILEGED_DATA static size_t xNumberOfSuccessfulAllocations = 0; +PRIVILEGED_DATA static size_t xNumberOfSuccessfulFrees = 0; + +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; + void * pvReturn = NULL; + size_t xAdditionalRequiredSize; + + vTaskSuspendAll(); + { + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xWantedSize > 0 ) + { + /* The wanted size must be increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. Some + * additional increment may also be needed for alignment. */ + xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); + + if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + { + xWantedSize += xAdditionalRequiredSize; + } + else + { + xWantedSize = 0; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the block size we are trying to allocate is not so large that the + * top bit is set. The top bit of the block size member of the BlockLink_t + * structure is used to determine who owns the block - the application or + * the kernel, so it must be free. */ + if( heapBLOCK_SIZE_IS_VALID( xWantedSize ) != 0 ) + { + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size + * was not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the + * single block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned + * by the application and has no "next" block. */ + heapALLOCATE_BLOCK( pxBlock ); + pxBlock->pxNextFreeBlock = NULL; + xNumberOfSuccessfulAllocations++; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + } + ( void ) xTaskResumeAll(); + + #if ( configUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */ + + configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ); + configASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + heapFREE_BLOCK( pxLink ); + #if ( configHEAP_CLEAR_MEMORY_ON_FREE == 1 ) + { + ( void ) memset( puc + xHeapStructSize, 0, pxLink->xBlockSize - xHeapStructSize ); + } + #endif + + vTaskSuspendAll(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + xNumberOfSuccessfulFrees++; + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +void vPortInitialiseBlocks( void ) +{ + /* This just exists to keep the linker quiet. */ +} +/*-----------------------------------------------------------*/ + +void * pvPortCalloc( size_t xNum, + size_t xSize ) +{ + void * pv = NULL; + + if( heapMULTIPLY_WILL_OVERFLOW( xNum, xSize ) == 0 ) + { + pv = pvPortMalloc( xNum * xSize ); + + if( pv != NULL ) + { + ( void ) memset( pv, 0, xNum * xSize ); + } + } + + return pv; +} +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + portPOINTER_SIZE_TYPE uxAddress; + size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( portPOINTER_SIZE_TYPE ) ucHeap; + + if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( portBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); + pxEnd = ( BlockLink_t * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + #if defined( __clang__ ) + /* Alignment checked above with calculations on uxAddress */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wcast-align" + #endif /* defined(__clang__) */ + pxFirstFreeBlock = ( BlockLink_t * ) pucAlignedHeap; + #if defined( __clang__ ) + #pragma clang diagnostic pop + #endif /* defined(__clang__) */ + pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock ); + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) /* PRIVILEGED_FUNCTION */ +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void vPortGetHeapStats( HeapStats_t * pxHeapStats ) +{ + BlockLink_t * pxBlock; + size_t xBlocks = 0, xMaxSize = 0, xMinSize = portMAX_DELAY; /* portMAX_DELAY used as a portable way of getting the maximum value. */ + + vTaskSuspendAll(); + { + pxBlock = xStart.pxNextFreeBlock; + + /* pxBlock will be NULL if the heap has not been initialised. The heap + * is initialised automatically when the first allocation is made. */ + if( pxBlock != NULL ) + { + while( pxBlock != pxEnd ) + { + /* Increment the number of blocks and record the largest block seen + * so far. */ + xBlocks++; + + if( pxBlock->xBlockSize > xMaxSize ) + { + xMaxSize = pxBlock->xBlockSize; + } + + if( pxBlock->xBlockSize < xMinSize ) + { + xMinSize = pxBlock->xBlockSize; + } + + /* Move to the next block in the chain until the last block is + * reached. */ + pxBlock = pxBlock->pxNextFreeBlock; + } + } + } + ( void ) xTaskResumeAll(); + + pxHeapStats->xSizeOfLargestFreeBlockInBytes = xMaxSize; + pxHeapStats->xSizeOfSmallestFreeBlockInBytes = xMinSize; + pxHeapStats->xNumberOfFreeBlocks = xBlocks; + + taskENTER_CRITICAL(); + { + pxHeapStats->xAvailableHeapSpaceInBytes = xFreeBytesRemaining; + pxHeapStats->xNumberOfSuccessfulAllocations = xNumberOfSuccessfulAllocations; + pxHeapStats->xNumberOfSuccessfulFrees = xNumberOfSuccessfulFrees; + pxHeapStats->xMinimumEverFreeBytesRemaining = xMinimumEverFreeBytesRemaining; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ diff --git a/queue.c b/queue.c index 7c14e3c8501..ca6bfbc60dc 100644 --- a/queue.c +++ b/queue.c @@ -1,2796 +1,2796 @@ -/* - * FreeRTOS Kernel - * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of - * this software and associated documentation files (the "Software"), to deal in - * the Software without restriction, including without limitation the rights to - * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - * the Software, and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * https://www.FreeRTOS.org - * https://github.com/FreeRTOS - * - */ - -#include -#include - -/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining - * all the API functions to use the MPU wrappers. That should only be done when - * task.h is included from an application file. */ -#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE - -#include "FreeRTOS.h" -#include "task.h" -#include "queue.h" - -/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified - * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined - * for the header files above, but not in this file, in order to generate the - * correct privileged Vs unprivileged linkage and placement. */ -#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ - - -/* Constants used with the cRxLock and cTxLock structure members. */ -#define queueUNLOCKED ( ( int8_t ) -1 ) -#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) -#define queueINT8_MAX ( ( int8_t ) 127 ) - -/* When the Queue_t structure is used to represent a base queue its pcHead and - * pcTail members are used as pointers into the queue storage area. When the - * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are - * not necessary, and the pcHead pointer is set to NULL to indicate that the - * structure instead holds a pointer to the mutex holder (if any). Map alternative - * names to the pcHead and structure member to ensure the readability of the code - * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form - * a union as their usage is mutually exclusive dependent on what the queue is - * being used for. */ -#define uxQueueType pcHead -#define queueQUEUE_IS_MUTEX NULL - -typedef struct QueuePointers -{ - int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ - int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */ -} QueuePointers_t; - -typedef struct SemaphoreData -{ - TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */ - UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ -} SemaphoreData_t; - -/* Semaphores do not actually store or copy data, so have an item size of - * zero. */ -#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 ) -#define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U ) - -#if ( configUSE_PREEMPTION == 0 ) - -/* If the cooperative scheduler is being used then a yield should not be - * performed just because a higher priority task has been woken. */ - #define queueYIELD_IF_USING_PREEMPTION() -#else - #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() -#endif - -/* - * Definition of the queue used by the scheduler. - * Items are queued by copy, not reference. See the following link for the - * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html - */ -typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ -{ - int8_t * pcHead; /**< Points to the beginning of the queue storage area. */ - int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */ - - union - { - QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */ - SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */ - } u; - - List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ - List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ - - volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */ - UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */ - UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */ - - volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - - #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ - #endif - - #if ( configUSE_QUEUE_SETS == 1 ) - struct QueueDefinition * pxQueueSetContainer; - #endif - - #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxQueueNumber; - uint8_t ucQueueType; - #endif -} xQUEUE; - -/* The old xQUEUE name is maintained above then typedefed to the new Queue_t - * name below to enable the use of older kernel aware debuggers. */ -typedef xQUEUE Queue_t; - -/*-----------------------------------------------------------*/ - -/* - * The queue registry is just a means for kernel aware debuggers to locate - * queue structures. It has no other purpose so is an optional component. - */ -#if ( configQUEUE_REGISTRY_SIZE > 0 ) - -/* The type stored within the queue registry array. This allows a name - * to be assigned to each queue making kernel aware debugging a little - * more user friendly. */ - typedef struct QUEUE_REGISTRY_ITEM - { - const char * pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - QueueHandle_t xHandle; - } xQueueRegistryItem; - -/* The old xQueueRegistryItem name is maintained above then typedefed to the - * new xQueueRegistryItem name below to enable the use of older kernel aware - * debuggers. */ - typedef xQueueRegistryItem QueueRegistryItem_t; - -/* The queue registry is simply an array of QueueRegistryItem_t structures. - * The pcQueueName member of a structure being NULL is indicative of the - * array position being vacant. */ - PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; - -#endif /* configQUEUE_REGISTRY_SIZE */ - -/* - * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not - * prevent an ISR from adding or removing items to the queue, but does prevent - * an ISR from removing tasks from the queue event lists. If an ISR finds a - * queue is locked it will instead increment the appropriate queue lock count - * to indicate that a task may require unblocking. When the queue in unlocked - * these lock counts are inspected, and the appropriate action taken. - */ -static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; - -/* - * Uses a critical section to determine if there is any data in a queue. - * - * @return pdTRUE if the queue contains no items, otherwise pdFALSE. - */ -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; - -/* - * Uses a critical section to determine if there is any space in a queue. - * - * @return pdTRUE if there is no space, otherwise pdFALSE; - */ -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; - -/* - * Copies an item into the queue, either at the front of the queue or the - * back of the queue. - */ -static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, - const void * pvItemToQueue, - const BaseType_t xPosition ) PRIVILEGED_FUNCTION; - -/* - * Copies an item out of a queue. - */ -static void prvCopyDataFromQueue( Queue_t * const pxQueue, - void * const pvBuffer ) PRIVILEGED_FUNCTION; - -#if ( configUSE_QUEUE_SETS == 1 ) - -/* - * Checks to see if a queue is a member of a queue set, and if so, notifies - * the queue set that the queue contains data. - */ - static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; -#endif - -/* - * Called after a Queue_t structure has been allocated either statically or - * dynamically to fill in the structure's members. - */ -static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - uint8_t * pucQueueStorage, - const uint8_t ucQueueType, - Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION; - -/* - * Mutexes are a special type of queue. When a mutex is created, first the - * queue is created, then prvInitialiseMutex() is called to configure the queue - * as a mutex. - */ -#if ( configUSE_MUTEXES == 1 ) - static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION; -#endif - -#if ( configUSE_MUTEXES == 1 ) - -/* - * If a task waiting for a mutex causes the mutex holder to inherit a - * priority, but the waiting task times out, then the holder should - * disinherit the priority - but only down to the highest priority of any - * other tasks that are waiting for the same mutex. This function returns - * that priority. - */ - static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; -#endif -/*-----------------------------------------------------------*/ - -/* - * Macro to mark a queue as locked. Locking a queue prevents an ISR from - * accessing the queue event lists. - */ -#define prvLockQueue( pxQueue ) \ - taskENTER_CRITICAL(); \ - { \ - if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ - { \ - ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \ - } \ - if( ( pxQueue )->cTxLock == queueUNLOCKED ) \ - { \ - ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ - } \ - } \ - taskEXIT_CRITICAL() - -/* - * Macro to increment cTxLock member of the queue data structure. It is - * capped at the number of tasks in the system as we cannot unblock more - * tasks than the number of tasks in the system. - */ -#define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ - do { \ - const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ - if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \ - { \ - configASSERT( ( cTxLock ) != queueINT8_MAX ); \ - ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \ - } \ - } while( 0 ) - -/* - * Macro to increment cRxLock member of the queue data structure. It is - * capped at the number of tasks in the system as we cannot unblock more - * tasks than the number of tasks in the system. - */ -#define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ - do { \ - const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ - if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \ - { \ - configASSERT( ( cRxLock ) != queueINT8_MAX ); \ - ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ - } \ - } while( 0 ) -/*-----------------------------------------------------------*/ - -BaseType_t xQueueGenericReset( QueueHandle_t xQueue, - BaseType_t xNewQueue ) -{ - BaseType_t xReturn = pdPASS; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - - if( ( pxQueue != NULL ) && - ( pxQueue->uxLength >= 1U ) && - /* Check for multiplication overflow. */ - ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) - { - taskENTER_CRITICAL(); - { - pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ - pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; - pxQueue->pcWriteTo = pxQueue->pcHead; - pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ - pxQueue->cRxLock = queueUNLOCKED; - pxQueue->cTxLock = queueUNLOCKED; - - if( xNewQueue == pdFALSE ) - { - /* If there are tasks blocked waiting to read from the queue, then - * the tasks will remain blocked as after this function exits the queue - * will still be empty. If there are tasks blocked waiting to write to - * the queue, then one should be unblocked as after this function exits - * it will be possible to write to it. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* Ensure the event queues start in the correct state. */ - vListInitialise( &( pxQueue->xTasksWaitingToSend ) ); - vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); - } - } - taskEXIT_CRITICAL(); - } - else - { - xReturn = pdFAIL; - } - - configASSERT( xReturn != pdFAIL ); - - /* A value is returned for calling semantic consistency with previous - * versions. */ - return xReturn; -} -/*-----------------------------------------------------------*/ - -#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) - - QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - uint8_t * pucQueueStorage, - StaticQueue_t * pxStaticQueue, - const uint8_t ucQueueType ) - { - Queue_t * pxNewQueue = NULL; - - /* The StaticQueue_t structure and the queue storage area must be - * supplied. */ - configASSERT( pxStaticQueue ); - - if( ( uxQueueLength > ( UBaseType_t ) 0 ) && - ( pxStaticQueue != NULL ) && - - /* A queue storage area should be provided if the item size is not 0, and - * should not be provided if the item size is 0. */ - ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ) && - ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ) ) - { - #if ( configASSERT_DEFINED == 1 ) - { - /* Sanity check that the size of the structure used to declare a - * variable of type StaticQueue_t or StaticSemaphore_t equals the size of - * the real queue and semaphore structures. */ - volatile size_t xSize = sizeof( StaticQueue_t ); - - /* This assertion cannot be branch covered in unit tests */ - configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */ - ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ - } - #endif /* configASSERT_DEFINED */ - - /* The address of a statically allocated queue was passed in, use it. - * The address of a statically allocated storage area was also passed in - * but is already set. */ - pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ - - #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) - { - /* Queues can be allocated wither statically or dynamically, so - * note this queue was allocated statically in case the queue is - * later deleted. */ - pxNewQueue->ucStaticallyAllocated = pdTRUE; - } - #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ - - prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); - } - else - { - configASSERT( pxNewQueue ); - mtCOVERAGE_TEST_MARKER(); - } - - return pxNewQueue; - } - -#endif /* configSUPPORT_STATIC_ALLOCATION */ -/*-----------------------------------------------------------*/ - -#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) - - QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - const uint8_t ucQueueType ) - { - Queue_t * pxNewQueue = NULL; - size_t xQueueSizeInBytes; - uint8_t * pucQueueStorage; - - if( ( uxQueueLength > ( UBaseType_t ) 0 ) && - /* Check for multiplication overflow. */ - ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) && - /* Check for addition overflow. */ - ( ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) ) - { - /* Allocate enough space to hold the maximum number of items that - * can be in the queue at any time. It is valid for uxItemSize to be - * zero in the case the queue is used as a semaphore. */ - xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - - /* Allocate the queue and storage area. Justification for MISRA - * deviation as follows: pvPortMalloc() always ensures returned memory - * blocks are aligned per the requirements of the MCU stack. In this case - * pvPortMalloc() must return a pointer that is guaranteed to meet the - * alignment requirements of the Queue_t structure - which in this case - * is an int8_t *. Therefore, whenever the stack alignment requirements - * are greater than or equal to the pointer to char requirements the cast - * is safe. In other cases alignment requirements are not strict (one or - * two bytes). */ - pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */ - - if( pxNewQueue != NULL ) - { - /* Jump past the queue structure to find the location of the queue - * storage area. */ - pucQueueStorage = ( uint8_t * ) pxNewQueue; - pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ - - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) - { - /* Queues can be created either statically or dynamically, so - * note this task was created dynamically in case it is later - * deleted. */ - pxNewQueue->ucStaticallyAllocated = pdFALSE; - } - #endif /* configSUPPORT_STATIC_ALLOCATION */ - - prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); - } - else - { - traceQUEUE_CREATE_FAILED( ucQueueType ); - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - configASSERT( pxNewQueue ); - mtCOVERAGE_TEST_MARKER(); - } - - return pxNewQueue; - } - -#endif /* configSUPPORT_STATIC_ALLOCATION */ -/*-----------------------------------------------------------*/ - -static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - uint8_t * pucQueueStorage, - const uint8_t ucQueueType, - Queue_t * pxNewQueue ) -{ - /* Remove compiler warnings about unused parameters should - * configUSE_TRACE_FACILITY not be set to 1. */ - ( void ) ucQueueType; - - if( uxItemSize == ( UBaseType_t ) 0 ) - { - /* No RAM was allocated for the queue storage area, but PC head cannot - * be set to NULL because NULL is used as a key to say the queue is used as - * a mutex. Therefore just set pcHead to point to the queue as a benign - * value that is known to be within the memory map. */ - pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; - } - else - { - /* Set the head to the start of the queue storage area. */ - pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; - } - - /* Initialise the queue members as described where the queue type is - * defined. */ - pxNewQueue->uxLength = uxQueueLength; - pxNewQueue->uxItemSize = uxItemSize; - ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); - - #if ( configUSE_TRACE_FACILITY == 1 ) - { - pxNewQueue->ucQueueType = ucQueueType; - } - #endif /* configUSE_TRACE_FACILITY */ - - #if ( configUSE_QUEUE_SETS == 1 ) - { - pxNewQueue->pxQueueSetContainer = NULL; - } - #endif /* configUSE_QUEUE_SETS */ - - traceQUEUE_CREATE( pxNewQueue ); -} -/*-----------------------------------------------------------*/ - -#if ( configUSE_MUTEXES == 1 ) - - static void prvInitialiseMutex( Queue_t * pxNewQueue ) - { - if( pxNewQueue != NULL ) - { - /* The queue create function will set all the queue structure members - * correctly for a generic queue, but this function is creating a - * mutex. Overwrite those members that need to be set differently - - * in particular the information required for priority inheritance. */ - pxNewQueue->u.xSemaphore.xMutexHolder = NULL; - pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX; - - /* In case this is a recursive mutex. */ - pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; - - traceCREATE_MUTEX( pxNewQueue ); - - /* Start with the semaphore in the expected state. */ - ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK ); - } - else - { - traceCREATE_MUTEX_FAILED(); - } - } - -#endif /* configUSE_MUTEXES */ -/*-----------------------------------------------------------*/ - -#if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - - QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) - { - QueueHandle_t xNewQueue; - const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; - - xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType ); - prvInitialiseMutex( ( Queue_t * ) xNewQueue ); - - return xNewQueue; - } - -#endif /* configUSE_MUTEXES */ -/*-----------------------------------------------------------*/ - -#if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) - - QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, - StaticQueue_t * pxStaticQueue ) - { - QueueHandle_t xNewQueue; - const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; - - /* Prevent compiler warnings about unused parameters if - * configUSE_TRACE_FACILITY does not equal 1. */ - ( void ) ucQueueType; - - xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType ); - prvInitialiseMutex( ( Queue_t * ) xNewQueue ); - - return xNewQueue; - } - -#endif /* configUSE_MUTEXES */ -/*-----------------------------------------------------------*/ - -#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) - - TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) - { - TaskHandle_t pxReturn; - Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore; - - configASSERT( xSemaphore ); - - /* This function is called by xSemaphoreGetMutexHolder(), and should not - * be called directly. Note: This is a good way of determining if the - * calling task is the mutex holder, but not a good way of determining the - * identity of the mutex holder, as the holder may change between the - * following critical section exiting and the function returning. */ - taskENTER_CRITICAL(); - { - if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) - { - pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder; - } - else - { - pxReturn = NULL; - } - } - taskEXIT_CRITICAL(); - - return pxReturn; - } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ - -#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) - - TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) - { - TaskHandle_t pxReturn; - - configASSERT( xSemaphore ); - - /* Mutexes cannot be used in interrupt service routines, so the mutex - * holder should not change in an ISR, and therefore a critical section is - * not required here. */ - if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX ) - { - pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder; - } - else - { - pxReturn = NULL; - } - - return pxReturn; - } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ - -#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_RECURSIVE_MUTEXES == 1 ) - - BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) - { - BaseType_t xReturn; - Queue_t * const pxMutex = ( Queue_t * ) xMutex; - - configASSERT( pxMutex ); - - /* If this is the task that holds the mutex then xMutexHolder will not - * change outside of this task. If this task does not hold the mutex then - * pxMutexHolder can never coincidentally equal the tasks handle, and as - * this is the only condition we are interested in it does not matter if - * pxMutexHolder is accessed simultaneously by another task. Therefore no - * mutual exclusion is required to test the pxMutexHolder variable. */ - if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) - { - traceGIVE_MUTEX_RECURSIVE( pxMutex ); - - /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to - * the task handle, therefore no underflow check is required. Also, - * uxRecursiveCallCount is only modified by the mutex holder, and as - * there can only be one, no mutual exclusion is required to modify the - * uxRecursiveCallCount member. */ - ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--; - - /* Has the recursive call count unwound to 0? */ - if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 ) - { - /* Return the mutex. This will automatically unblock any other - * task that might be waiting to access the mutex. */ - ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - xReturn = pdPASS; - } - else - { - /* The mutex cannot be given because the calling task is not the - * holder. */ - xReturn = pdFAIL; - - traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ); - } - - return xReturn; - } - -#endif /* configUSE_RECURSIVE_MUTEXES */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_RECURSIVE_MUTEXES == 1 ) - - BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, - TickType_t xTicksToWait ) - { - BaseType_t xReturn; - Queue_t * const pxMutex = ( Queue_t * ) xMutex; - - configASSERT( pxMutex ); - - /* Comments regarding mutual exclusion as per those within - * xQueueGiveMutexRecursive(). */ - - traceTAKE_MUTEX_RECURSIVE( pxMutex ); - - if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) - { - ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; - xReturn = pdPASS; - } - else - { - xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait ); - - /* pdPASS will only be returned if the mutex was successfully - * obtained. The calling task may have entered the Blocked state - * before reaching here. */ - if( xReturn != pdFAIL ) - { - ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; - } - else - { - traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ); - } - } - - return xReturn; - } - -#endif /* configUSE_RECURSIVE_MUTEXES */ -/*-----------------------------------------------------------*/ - -#if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) - - QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, - const UBaseType_t uxInitialCount, - StaticQueue_t * pxStaticQueue ) - { - QueueHandle_t xHandle = NULL; - - if( ( uxMaxCount != 0 ) && - ( uxInitialCount <= uxMaxCount ) ) - { - xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); - - if( xHandle != NULL ) - { - ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; - - traceCREATE_COUNTING_SEMAPHORE(); - } - else - { - traceCREATE_COUNTING_SEMAPHORE_FAILED(); - } - } - else - { - configASSERT( xHandle ); - mtCOVERAGE_TEST_MARKER(); - } - - return xHandle; - } - -#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - - QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, - const UBaseType_t uxInitialCount ) - { - QueueHandle_t xHandle = NULL; - - if( ( uxMaxCount != 0 ) && - ( uxInitialCount <= uxMaxCount ) ) - { - xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); - - if( xHandle != NULL ) - { - ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; - - traceCREATE_COUNTING_SEMAPHORE(); - } - else - { - traceCREATE_COUNTING_SEMAPHORE_FAILED(); - } - } - else - { - configASSERT( xHandle ); - mtCOVERAGE_TEST_MARKER(); - } - - return xHandle; - } - -#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ -/*-----------------------------------------------------------*/ - -BaseType_t xQueueGenericSend( QueueHandle_t xQueue, - const void * const pvItemToQueue, - TickType_t xTicksToWait, - const BaseType_t xCopyPosition ) -{ - BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired; - TimeOut_t xTimeOut; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); - configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); - #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) - { - configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); - } - #endif - - /*lint -save -e904 This function relaxes the coding standard somewhat to - * allow return statements within the function itself. This is done in the - * interest of execution time efficiency. */ - for( ; ; ) - { - taskENTER_CRITICAL(); - { - /* Is there room on the queue now? The running task must be the - * highest priority task wanting to access the queue. If the head item - * in the queue is to be overwritten then it does not matter if the - * queue is full. */ - if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) - { - traceQUEUE_SEND( pxQueue ); - - #if ( configUSE_QUEUE_SETS == 1 ) - { - const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; - - xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); - - if( pxQueue->pxQueueSetContainer != NULL ) - { - if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) - { - /* Do not notify the queue set as an existing item - * was overwritten in the queue so the number of items - * in the queue has not changed. */ - mtCOVERAGE_TEST_MARKER(); - } - else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) - { - /* The queue is a member of a queue set, and posting - * to the queue set caused a higher priority task to - * unblock. A context switch is required. */ - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* If there was a task waiting for data to arrive on the - * queue then unblock it now. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The unblocked task has a priority higher than - * our own so yield immediately. Yes it is ok to - * do this from within the critical section - the - * kernel takes care of that. */ - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else if( xYieldRequired != pdFALSE ) - { - /* This path is a special case that will only get - * executed if the task was holding multiple mutexes - * and the mutexes were given back in an order that is - * different to that in which they were taken. */ - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } - #else /* configUSE_QUEUE_SETS */ - { - xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); - - /* If there was a task waiting for data to arrive on the - * queue then unblock it now. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The unblocked task has a priority higher than - * our own so yield immediately. Yes it is ok to do - * this from within the critical section - the kernel - * takes care of that. */ - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else if( xYieldRequired != pdFALSE ) - { - /* This path is a special case that will only get - * executed if the task was holding multiple mutexes and - * the mutexes were given back in an order that is - * different to that in which they were taken. */ - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_QUEUE_SETS */ - - taskEXIT_CRITICAL(); - return pdPASS; - } - else - { - if( xTicksToWait == ( TickType_t ) 0 ) - { - /* The queue was full and no block time is specified (or - * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); - - /* Return to the original privilege level before exiting - * the function. */ - traceQUEUE_SEND_FAILED( pxQueue ); - return errQUEUE_FULL; - } - else if( xEntryTimeSet == pdFALSE ) - { - /* The queue was full and a block time was specified so - * configure the timeout structure. */ - vTaskInternalSetTimeOutState( &xTimeOut ); - xEntryTimeSet = pdTRUE; - } - else - { - /* Entry time was already set. */ - mtCOVERAGE_TEST_MARKER(); - } - } - } - taskEXIT_CRITICAL(); - - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - if( prvIsQueueFull( pxQueue ) != pdFALSE ) - { - traceBLOCKING_ON_QUEUE_SEND( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); - - /* Unlocking the queue means queue events can effect the - * event list. It is possible that interrupts occurring now - * remove this task from the event list again - but as the - * scheduler is suspended the task will go onto the pending - * ready list instead of the actual ready list. */ - prvUnlockQueue( pxQueue ); - - /* Resuming the scheduler will move tasks from the pending - * ready list into the ready list - so it is feasible that this - * task is already in the ready list before it yields - in which - * case the yield will not cause a context switch unless there - * is also a higher priority task in the pending ready list. */ - if( xTaskResumeAll() == pdFALSE ) - { - portYIELD_WITHIN_API(); - } - } - else - { - /* Try again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* The timeout has expired. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - traceQUEUE_SEND_FAILED( pxQueue ); - return errQUEUE_FULL; - } - } /*lint -restore */ -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, - const void * const pvItemToQueue, - BaseType_t * const pxHigherPriorityTaskWoken, - const BaseType_t xCopyPosition ) -{ - BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); - configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); - - /* RTOS ports that support interrupt nesting have the concept of a maximum - * system call (or maximum API call) interrupt priority. Interrupts that are - * above the maximum system call priority are kept permanently enabled, even - * when the RTOS kernel is in a critical section, but cannot make any calls to - * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h - * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion - * failure if a FreeRTOS API function is called from an interrupt that has been - * assigned a priority above the configured maximum system call priority. - * Only FreeRTOS functions that end in FromISR can be called from interrupts - * that have been assigned a priority at or (logically) below the maximum - * system call interrupt priority. FreeRTOS maintains a separate interrupt - * safe API to ensure interrupt entry is as fast and as simple as possible. - * More information (albeit Cortex-M specific) is provided on the following - * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - - /* Similar to xQueueGenericSend, except without blocking if there is no room - * in the queue. Also don't directly wake a task that was blocked on a queue - * read, instead return a flag to say whether a context switch is required or - * not (i.e. has a task with a higher priority than us been woken by this - * post). */ - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { - if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) - { - const int8_t cTxLock = pxQueue->cTxLock; - const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; - - traceQUEUE_SEND_FROM_ISR( pxQueue ); - - /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a - * semaphore or mutex. That means prvCopyDataToQueue() cannot result - * in a task disinheriting a priority and prvCopyDataToQueue() can be - * called here even though the disinherit function does not check if - * the scheduler is suspended before accessing the ready lists. */ - ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); - - /* The event list is not altered if the queue is locked. This will - * be done when the queue is unlocked later. */ - if( cTxLock == queueUNLOCKED ) - { - #if ( configUSE_QUEUE_SETS == 1 ) - { - if( pxQueue->pxQueueSetContainer != NULL ) - { - if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) - { - /* Do not notify the queue set as an existing item - * was overwritten in the queue so the number of items - * in the queue has not changed. */ - mtCOVERAGE_TEST_MARKER(); - } - else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) - { - /* The queue is a member of a queue set, and posting - * to the queue set caused a higher priority task to - * unblock. A context switch is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority so - * record that a context switch is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } - #else /* configUSE_QUEUE_SETS */ - { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority so record that a - * context switch is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Not used in this path. */ - ( void ) uxPreviousMessagesWaiting; - } - #endif /* configUSE_QUEUE_SETS */ - } - else - { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - prvIncrementQueueTxLock( pxQueue, cTxLock ); - } - - xReturn = pdPASS; - } - else - { - traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); - xReturn = errQUEUE_FULL; - } - } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); - - return xReturn; -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, - BaseType_t * const pxHigherPriorityTaskWoken ) -{ - BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; - Queue_t * const pxQueue = xQueue; - - /* Similar to xQueueGenericSendFromISR() but used with semaphores where the - * item size is 0. Don't directly wake a task that was blocked on a queue - * read, instead return a flag to say whether a context switch is required or - * not (i.e. has a task with a higher priority than us been woken by this - * post). */ - - configASSERT( pxQueue ); - - /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR() - * if the item size is not 0. */ - configASSERT( pxQueue->uxItemSize == 0 ); - - /* Normally a mutex would not be given from an interrupt, especially if - * there is a mutex holder, as priority inheritance makes no sense for an - * interrupts, only tasks. */ - configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) ); - - /* RTOS ports that support interrupt nesting have the concept of a maximum - * system call (or maximum API call) interrupt priority. Interrupts that are - * above the maximum system call priority are kept permanently enabled, even - * when the RTOS kernel is in a critical section, but cannot make any calls to - * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h - * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion - * failure if a FreeRTOS API function is called from an interrupt that has been - * assigned a priority above the configured maximum system call priority. - * Only FreeRTOS functions that end in FromISR can be called from interrupts - * that have been assigned a priority at or (logically) below the maximum - * system call interrupt priority. FreeRTOS maintains a separate interrupt - * safe API to ensure interrupt entry is as fast and as simple as possible. - * More information (albeit Cortex-M specific) is provided on the following - * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { - const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; - - /* When the queue is used to implement a semaphore no data is ever - * moved through the queue but it is still valid to see if the queue 'has - * space'. */ - if( uxMessagesWaiting < pxQueue->uxLength ) - { - const int8_t cTxLock = pxQueue->cTxLock; - - traceQUEUE_SEND_FROM_ISR( pxQueue ); - - /* A task can only have an inherited priority if it is a mutex - * holder - and if there is a mutex holder then the mutex cannot be - * given from an ISR. As this is the ISR version of the function it - * can be assumed there is no mutex holder and no need to determine if - * priority disinheritance is needed. Simply increase the count of - * messages (semaphores) available. */ - pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; - - /* The event list is not altered if the queue is locked. This will - * be done when the queue is unlocked later. */ - if( cTxLock == queueUNLOCKED ) - { - #if ( configUSE_QUEUE_SETS == 1 ) - { - if( pxQueue->pxQueueSetContainer != NULL ) - { - if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) - { - /* The semaphore is a member of a queue set, and - * posting to the queue set caused a higher priority - * task to unblock. A context switch is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority so - * record that a context switch is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } - #else /* configUSE_QUEUE_SETS */ - { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority so record that a - * context switch is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_QUEUE_SETS */ - } - else - { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - prvIncrementQueueTxLock( pxQueue, cTxLock ); - } - - xReturn = pdPASS; - } - else - { - traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); - xReturn = errQUEUE_FULL; - } - } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); - - return xReturn; -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueueReceive( QueueHandle_t xQueue, - void * const pvBuffer, - TickType_t xTicksToWait ) -{ - BaseType_t xEntryTimeSet = pdFALSE; - TimeOut_t xTimeOut; - Queue_t * const pxQueue = xQueue; - - /* Check the pointer is not NULL. */ - configASSERT( ( pxQueue ) ); - - /* The buffer into which data is received can only be NULL if the data size - * is zero (so no data is copied into the buffer). */ - configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); - - /* Cannot block if the scheduler is suspended. */ - #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) - { - configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); - } - #endif - - /*lint -save -e904 This function relaxes the coding standard somewhat to - * allow return statements within the function itself. This is done in the - * interest of execution time efficiency. */ - for( ; ; ) - { - taskENTER_CRITICAL(); - { - const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; - - /* Is there data in the queue now? To be running the calling task - * must be the highest priority task wanting to access the queue. */ - if( uxMessagesWaiting > ( UBaseType_t ) 0 ) - { - /* Data available, remove one item. */ - prvCopyDataFromQueue( pxQueue, pvBuffer ); - traceQUEUE_RECEIVE( pxQueue ); - pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; - - /* There is now space in the queue, were any tasks waiting to - * post to the queue? If so, unblock the highest priority waiting - * task. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - taskEXIT_CRITICAL(); - return pdPASS; - } - else - { - if( xTicksToWait == ( TickType_t ) 0 ) - { - /* The queue was empty and no block time is specified (or - * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else if( xEntryTimeSet == pdFALSE ) - { - /* The queue was empty and a block time was specified so - * configure the timeout structure. */ - vTaskInternalSetTimeOutState( &xTimeOut ); - xEntryTimeSet = pdTRUE; - } - else - { - /* Entry time was already set. */ - mtCOVERAGE_TEST_MARKER(); - } - } - } - taskEXIT_CRITICAL(); - - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* The timeout has not expired. If the queue is still empty place - * the task on the list of tasks waiting to receive from the queue. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) - { - portYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* The queue contains data again. Loop back to try and read the - * data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* Timed out. If there is no data in the queue exit, otherwise loop - * back and attempt to read the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } /*lint -restore */ -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, - TickType_t xTicksToWait ) -{ - BaseType_t xEntryTimeSet = pdFALSE; - TimeOut_t xTimeOut; - Queue_t * const pxQueue = xQueue; - - #if ( configUSE_MUTEXES == 1 ) - BaseType_t xInheritanceOccurred = pdFALSE; - #endif - - /* Check the queue pointer is not NULL. */ - configASSERT( ( pxQueue ) ); - - /* Check this really is a semaphore, in which case the item size will be - * 0. */ - configASSERT( pxQueue->uxItemSize == 0 ); - - /* Cannot block if the scheduler is suspended. */ - #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) - { - configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); - } - #endif - - /*lint -save -e904 This function relaxes the coding standard somewhat to allow return - * statements within the function itself. This is done in the interest - * of execution time efficiency. */ - for( ; ; ) - { - taskENTER_CRITICAL(); - { - /* Semaphores are queues with an item size of 0, and where the - * number of messages in the queue is the semaphore's count value. */ - const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting; - - /* Is there data in the queue now? To be running the calling task - * must be the highest priority task wanting to access the queue. */ - if( uxSemaphoreCount > ( UBaseType_t ) 0 ) - { - traceQUEUE_RECEIVE( pxQueue ); - - /* Semaphores are queues with a data size of zero and where the - * messages waiting is the semaphore's count. Reduce the count. */ - pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1; - - #if ( configUSE_MUTEXES == 1 ) - { - if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) - { - /* Record the information required to implement - * priority inheritance should it become necessary. */ - pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_MUTEXES */ - - /* Check to see if other tasks are blocked waiting to give the - * semaphore, and if so, unblock the highest priority such task. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - taskEXIT_CRITICAL(); - return pdPASS; - } - else - { - if( xTicksToWait == ( TickType_t ) 0 ) - { - /* The semaphore count was 0 and no block time is specified - * (or the block time has expired) so exit now. */ - taskEXIT_CRITICAL(); - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else if( xEntryTimeSet == pdFALSE ) - { - /* The semaphore count was 0 and a block time was specified - * so configure the timeout structure ready to block. */ - vTaskInternalSetTimeOutState( &xTimeOut ); - xEntryTimeSet = pdTRUE; - } - else - { - /* Entry time was already set. */ - mtCOVERAGE_TEST_MARKER(); - } - } - } - taskEXIT_CRITICAL(); - - /* Interrupts and other tasks can give to and take from the semaphore - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* A block time is specified and not expired. If the semaphore - * count is 0 then enter the Blocked state to wait for a semaphore to - * become available. As semaphores are implemented with queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); - - #if ( configUSE_MUTEXES == 1 ) - { - if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) - { - taskENTER_CRITICAL(); - { - xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); - } - taskEXIT_CRITICAL(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* if ( configUSE_MUTEXES == 1 ) */ - - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) - { - portYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* There was no timeout and the semaphore count was not 0, so - * attempt to take the semaphore again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* Timed out. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - /* If the semaphore count is 0 exit now as the timeout has - * expired. Otherwise return to attempt to take the semaphore that is - * known to be available. As semaphores are implemented by queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - #if ( configUSE_MUTEXES == 1 ) - { - /* xInheritanceOccurred could only have be set if - * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to - * test the mutex type again to check it is actually a mutex. */ - if( xInheritanceOccurred != pdFALSE ) - { - taskENTER_CRITICAL(); - { - UBaseType_t uxHighestWaitingPriority; - - /* This task blocking on the mutex caused another - * task to inherit this task's priority. Now this task - * has timed out the priority should be disinherited - * again, but only as low as the next highest priority - * task that is waiting for the same mutex. */ - uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); - vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); - } - taskEXIT_CRITICAL(); - } - } - #endif /* configUSE_MUTEXES */ - - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } /*lint -restore */ -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueuePeek( QueueHandle_t xQueue, - void * const pvBuffer, - TickType_t xTicksToWait ) -{ - BaseType_t xEntryTimeSet = pdFALSE; - TimeOut_t xTimeOut; - int8_t * pcOriginalReadPosition; - Queue_t * const pxQueue = xQueue; - - /* Check the pointer is not NULL. */ - configASSERT( ( pxQueue ) ); - - /* The buffer into which data is received can only be NULL if the data size - * is zero (so no data is copied into the buffer. */ - configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); - - /* Cannot block if the scheduler is suspended. */ - #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) - { - configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); - } - #endif - - /*lint -save -e904 This function relaxes the coding standard somewhat to - * allow return statements within the function itself. This is done in the - * interest of execution time efficiency. */ - for( ; ; ) - { - taskENTER_CRITICAL(); - { - const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; - - /* Is there data in the queue now? To be running the calling task - * must be the highest priority task wanting to access the queue. */ - if( uxMessagesWaiting > ( UBaseType_t ) 0 ) - { - /* Remember the read position so it can be reset after the data - * is read from the queue as this function is only peeking the - * data, not removing it. */ - pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; - - prvCopyDataFromQueue( pxQueue, pvBuffer ); - traceQUEUE_PEEK( pxQueue ); - - /* The data is not being removed, so reset the read pointer. */ - pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; - - /* The data is being left in the queue, so see if there are - * any other tasks waiting for the data. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority than this task. */ - queueYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - taskEXIT_CRITICAL(); - return pdPASS; - } - else - { - if( xTicksToWait == ( TickType_t ) 0 ) - { - /* The queue was empty and no block time is specified (or - * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); - traceQUEUE_PEEK_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else if( xEntryTimeSet == pdFALSE ) - { - /* The queue was empty and a block time was specified so - * configure the timeout structure ready to enter the blocked - * state. */ - vTaskInternalSetTimeOutState( &xTimeOut ); - xEntryTimeSet = pdTRUE; - } - else - { - /* Entry time was already set. */ - mtCOVERAGE_TEST_MARKER(); - } - } - } - taskEXIT_CRITICAL(); - - /* Interrupts and other tasks can send to and receive from the queue - * now that the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* Timeout has not expired yet, check to see if there is data in the - * queue now, and if not enter the Blocked state to wait for data. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) - { - portYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* There is data in the queue now, so don't enter the blocked - * state, instead return to try and obtain the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* The timeout has expired. If there is still no data in the queue - * exit, otherwise go back and try to read the data again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_PEEK_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } /*lint -restore */ -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, - void * const pvBuffer, - BaseType_t * const pxHigherPriorityTaskWoken ) -{ - BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); - - /* RTOS ports that support interrupt nesting have the concept of a maximum - * system call (or maximum API call) interrupt priority. Interrupts that are - * above the maximum system call priority are kept permanently enabled, even - * when the RTOS kernel is in a critical section, but cannot make any calls to - * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h - * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion - * failure if a FreeRTOS API function is called from an interrupt that has been - * assigned a priority above the configured maximum system call priority. - * Only FreeRTOS functions that end in FromISR can be called from interrupts - * that have been assigned a priority at or (logically) below the maximum - * system call interrupt priority. FreeRTOS maintains a separate interrupt - * safe API to ensure interrupt entry is as fast and as simple as possible. - * More information (albeit Cortex-M specific) is provided on the following - * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { - const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; - - /* Cannot block in an ISR, so check there is data available. */ - if( uxMessagesWaiting > ( UBaseType_t ) 0 ) - { - const int8_t cRxLock = pxQueue->cRxLock; - - traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); - - prvCopyDataFromQueue( pxQueue, pvBuffer ); - pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; - - /* If the queue is locked the event list will not be modified. - * Instead update the lock count so the task that unlocks the queue - * will know that an ISR has removed data while the queue was - * locked. */ - if( cRxLock == queueUNLOCKED ) - { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - /* The task waiting has a higher priority than us so - * force a context switch. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* Increment the lock count so the task that unlocks the queue - * knows that data was removed while it was locked. */ - prvIncrementQueueRxLock( pxQueue, cRxLock ); - } - - xReturn = pdPASS; - } - else - { - xReturn = pdFAIL; - traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); - } - } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); - - return xReturn; -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, - void * const pvBuffer ) -{ - BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; - int8_t * pcOriginalReadPosition; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); - configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */ - - /* RTOS ports that support interrupt nesting have the concept of a maximum - * system call (or maximum API call) interrupt priority. Interrupts that are - * above the maximum system call priority are kept permanently enabled, even - * when the RTOS kernel is in a critical section, but cannot make any calls to - * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h - * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion - * failure if a FreeRTOS API function is called from an interrupt that has been - * assigned a priority above the configured maximum system call priority. - * Only FreeRTOS functions that end in FromISR can be called from interrupts - * that have been assigned a priority at or (logically) below the maximum - * system call interrupt priority. FreeRTOS maintains a separate interrupt - * safe API to ensure interrupt entry is as fast and as simple as possible. - * More information (albeit Cortex-M specific) is provided on the following - * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { - /* Cannot block in an ISR, so check there is data available. */ - if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) - { - traceQUEUE_PEEK_FROM_ISR( pxQueue ); - - /* Remember the read position so it can be reset as nothing is - * actually being removed from the queue. */ - pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; - prvCopyDataFromQueue( pxQueue, pvBuffer ); - pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; - - xReturn = pdPASS; - } - else - { - xReturn = pdFAIL; - traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); - } - } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); - - return xReturn; -} -/*-----------------------------------------------------------*/ - -UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) -{ - UBaseType_t uxReturn; - - configASSERT( xQueue ); - - taskENTER_CRITICAL(); - { - uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; - } - taskEXIT_CRITICAL(); - - return uxReturn; -} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ -/*-----------------------------------------------------------*/ - -UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) -{ - UBaseType_t uxReturn; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - - taskENTER_CRITICAL(); - { - uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; - } - taskEXIT_CRITICAL(); - - return uxReturn; -} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ -/*-----------------------------------------------------------*/ - -UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) -{ - UBaseType_t uxReturn; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - uxReturn = pxQueue->uxMessagesWaiting; - - return uxReturn; -} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ -/*-----------------------------------------------------------*/ - -void vQueueDelete( QueueHandle_t xQueue ) -{ - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - traceQUEUE_DELETE( pxQueue ); - - #if ( configQUEUE_REGISTRY_SIZE > 0 ) - { - vQueueUnregisterQueue( pxQueue ); - } - #endif - - #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) - { - /* The queue can only have been allocated dynamically - free it - * again. */ - vPortFree( pxQueue ); - } - #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) - { - /* The queue could have been allocated statically or dynamically, so - * check before attempting to free the memory. */ - if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) - { - vPortFree( pxQueue ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */ - { - /* The queue must have been statically allocated, so is not going to be - * deleted. Avoid compiler warnings about the unused parameter. */ - ( void ) pxQueue; - } - #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ -} -/*-----------------------------------------------------------*/ - -#if ( configUSE_TRACE_FACILITY == 1 ) - - UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) - { - return ( ( Queue_t * ) xQueue )->uxQueueNumber; - } - -#endif /* configUSE_TRACE_FACILITY */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_TRACE_FACILITY == 1 ) - - void vQueueSetQueueNumber( QueueHandle_t xQueue, - UBaseType_t uxQueueNumber ) - { - ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber; - } - -#endif /* configUSE_TRACE_FACILITY */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_TRACE_FACILITY == 1 ) - - uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) - { - return ( ( Queue_t * ) xQueue )->ucQueueType; - } - -#endif /* configUSE_TRACE_FACILITY */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_MUTEXES == 1 ) - - static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) - { - UBaseType_t uxHighestPriorityOfWaitingTasks; - - /* If a task waiting for a mutex causes the mutex holder to inherit a - * priority, but the waiting task times out, then the holder should - * disinherit the priority - but only down to the highest priority of any - * other tasks that are waiting for the same mutex. For this purpose, - * return the priority of the highest priority task that is waiting for the - * mutex. */ - if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U ) - { - uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ); - } - else - { - uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY; - } - - return uxHighestPriorityOfWaitingTasks; - } - -#endif /* configUSE_MUTEXES */ -/*-----------------------------------------------------------*/ - -static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, - const void * pvItemToQueue, - const BaseType_t xPosition ) -{ - BaseType_t xReturn = pdFALSE; - UBaseType_t uxMessagesWaiting; - - /* This function is called from a critical section. */ - - uxMessagesWaiting = pxQueue->uxMessagesWaiting; - - if( pxQueue->uxItemSize == ( UBaseType_t ) 0 ) - { - #if ( configUSE_MUTEXES == 1 ) - { - if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) - { - /* The mutex is no longer being held. */ - xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder ); - pxQueue->u.xSemaphore.xMutexHolder = NULL; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_MUTEXES */ - } - else if( xPosition == queueSEND_TO_BACK ) - { - ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ - pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ - - if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ - { - pxQueue->pcWriteTo = pxQueue->pcHead; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */ - pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize; - - if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ - { - pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - if( xPosition == queueOVERWRITE ) - { - if( uxMessagesWaiting > ( UBaseType_t ) 0 ) - { - /* An item is not being added but overwritten, so subtract - * one from the recorded number of items in the queue so when - * one is added again below the number of recorded items remains - * correct. */ - --uxMessagesWaiting; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - - pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; - - return xReturn; -} -/*-----------------------------------------------------------*/ - -static void prvCopyDataFromQueue( Queue_t * const pxQueue, - void * const pvBuffer ) -{ - if( pxQueue->uxItemSize != ( UBaseType_t ) 0 ) - { - pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ - - if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */ - { - pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ - } -} -/*-----------------------------------------------------------*/ - -static void prvUnlockQueue( Queue_t * const pxQueue ) -{ - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - - /* The lock counts contains the number of extra data items placed or - * removed from the queue while the queue was locked. When a queue is - * locked items can be added or removed, but the event lists cannot be - * updated. */ - taskENTER_CRITICAL(); - { - int8_t cTxLock = pxQueue->cTxLock; - - /* See if data was added to the queue while it was locked. */ - while( cTxLock > queueLOCKED_UNMODIFIED ) - { - /* Data was posted while the queue was locked. Are any tasks - * blocked waiting for data to become available? */ - #if ( configUSE_QUEUE_SETS == 1 ) - { - if( pxQueue->pxQueueSetContainer != NULL ) - { - if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) - { - /* The queue is a member of a queue set, and posting to - * the queue set caused a higher priority task to unblock. - * A context switch is required. */ - vTaskMissedYield(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* Tasks that are removed from the event list will get - * added to the pending ready list as the scheduler is still - * suspended. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority so record that a - * context switch is required. */ - vTaskMissedYield(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - break; - } - } - } - #else /* configUSE_QUEUE_SETS */ - { - /* Tasks that are removed from the event list will get added to - * the pending ready list as the scheduler is still suspended. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority so record that - * a context switch is required. */ - vTaskMissedYield(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - break; - } - } - #endif /* configUSE_QUEUE_SETS */ - - --cTxLock; - } - - pxQueue->cTxLock = queueUNLOCKED; - } - taskEXIT_CRITICAL(); - - /* Do the same for the Rx lock. */ - taskENTER_CRITICAL(); - { - int8_t cRxLock = pxQueue->cRxLock; - - while( cRxLock > queueLOCKED_UNMODIFIED ) - { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - vTaskMissedYield(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - --cRxLock; - } - else - { - break; - } - } - - pxQueue->cRxLock = queueUNLOCKED; - } - taskEXIT_CRITICAL(); -} -/*-----------------------------------------------------------*/ - -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - taskENTER_CRITICAL(); - { - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - taskEXIT_CRITICAL(); - - return xReturn; -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) -{ - BaseType_t xReturn; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - - return xReturn; -} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ -/*-----------------------------------------------------------*/ - -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - taskENTER_CRITICAL(); - { - if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - taskEXIT_CRITICAL(); - - return xReturn; -} -/*-----------------------------------------------------------*/ - -BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) -{ - BaseType_t xReturn; - Queue_t * const pxQueue = xQueue; - - configASSERT( pxQueue ); - - if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - - return xReturn; -} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ -/*-----------------------------------------------------------*/ - -#if ( configQUEUE_REGISTRY_SIZE > 0 ) - - void vQueueAddToRegistry( QueueHandle_t xQueue, - const char * pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - { - UBaseType_t ux; - QueueRegistryItem_t * pxEntryToWrite = NULL; - - configASSERT( xQueue ); - - if( pcQueueName != NULL ) - { - /* See if there is an empty space in the registry. A NULL name denotes - * a free slot. */ - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) - { - /* Replace an existing entry if the queue is already in the registry. */ - if( xQueue == xQueueRegistry[ ux ].xHandle ) - { - pxEntryToWrite = &( xQueueRegistry[ ux ] ); - break; - } - /* Otherwise, store in the next empty location */ - else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) ) - { - pxEntryToWrite = &( xQueueRegistry[ ux ] ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } - - if( pxEntryToWrite != NULL ) - { - /* Store the information on this queue. */ - pxEntryToWrite->pcQueueName = pcQueueName; - pxEntryToWrite->xHandle = xQueue; - - traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); - } - } - -#endif /* configQUEUE_REGISTRY_SIZE */ -/*-----------------------------------------------------------*/ - -#if ( configQUEUE_REGISTRY_SIZE > 0 ) - - const char * pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - { - UBaseType_t ux; - const char * pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - - configASSERT( xQueue ); - - /* Note there is nothing here to protect against another task adding or - * removing entries from the registry while it is being searched. */ - - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) - { - if( xQueueRegistry[ ux ].xHandle == xQueue ) - { - pcReturn = xQueueRegistry[ ux ].pcQueueName; - break; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - - return pcReturn; - } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ - -#endif /* configQUEUE_REGISTRY_SIZE */ -/*-----------------------------------------------------------*/ - -#if ( configQUEUE_REGISTRY_SIZE > 0 ) - - void vQueueUnregisterQueue( QueueHandle_t xQueue ) - { - UBaseType_t ux; - - configASSERT( xQueue ); - - /* See if the handle of the queue being unregistered in actually in the - * registry. */ - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) - { - if( xQueueRegistry[ ux ].xHandle == xQueue ) - { - /* Set the name to NULL to show that this slot if free again. */ - xQueueRegistry[ ux ].pcQueueName = NULL; - - /* Set the handle to NULL to ensure the same queue handle cannot - * appear in the registry twice if it is added, removed, then - * added again. */ - xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; - break; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ - -#endif /* configQUEUE_REGISTRY_SIZE */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_TIMERS == 1 ) - - void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, - TickType_t xTicksToWait, - const BaseType_t xWaitIndefinitely ) - { - Queue_t * const pxQueue = xQueue; - - /* This function should not be called by application code hence the - * 'Restricted' in its name. It is not part of the public API. It is - * designed for use by kernel code, and has special calling requirements. - * It can result in vListInsert() being called on a list that can only - * possibly ever have one item in it, so the list will be fast, but even - * so it should be called with the scheduler locked and not from a critical - * section. */ - - /* Only do anything if there are no messages in the queue. This function - * will not actually cause the task to block, just place it on a blocked - * list. It will not block until the scheduler is unlocked - at which - * time a yield will be performed. If an item is added to the queue while - * the queue is locked, and the calling task blocks on the queue, then the - * calling task will be immediately unblocked when the queue is unlocked. */ - prvLockQueue( pxQueue ); - - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) - { - /* There is nothing in the queue, block for the specified period. */ - vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - prvUnlockQueue( pxQueue ); - } - -#endif /* configUSE_TIMERS */ -/*-----------------------------------------------------------*/ - -#if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - - QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) - { - QueueSetHandle_t pxQueue; - - pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET ); - - return pxQueue; - } - -#endif /* configUSE_QUEUE_SETS */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_QUEUE_SETS == 1 ) - - BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, - QueueSetHandle_t xQueueSet ) - { - BaseType_t xReturn; - - taskENTER_CRITICAL(); - { - if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) - { - /* Cannot add a queue/semaphore to more than one queue set. */ - xReturn = pdFAIL; - } - else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) - { - /* Cannot add a queue/semaphore to a queue set if there are already - * items in the queue/semaphore. */ - xReturn = pdFAIL; - } - else - { - ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; - xReturn = pdPASS; - } - } - taskEXIT_CRITICAL(); - - return xReturn; - } - -#endif /* configUSE_QUEUE_SETS */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_QUEUE_SETS == 1 ) - - BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, - QueueSetHandle_t xQueueSet ) - { - BaseType_t xReturn; - Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore; - - if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet ) - { - /* The queue was not a member of the set. */ - xReturn = pdFAIL; - } - else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 ) - { - /* It is dangerous to remove a queue from a set when the queue is - * not empty because the queue set will still hold pending events for - * the queue. */ - xReturn = pdFAIL; - } - else - { - taskENTER_CRITICAL(); - { - /* The queue is no longer contained in the set. */ - pxQueueOrSemaphore->pxQueueSetContainer = NULL; - } - taskEXIT_CRITICAL(); - xReturn = pdPASS; - } - - return xReturn; - } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */ - -#endif /* configUSE_QUEUE_SETS */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_QUEUE_SETS == 1 ) - - QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, - TickType_t const xTicksToWait ) - { - QueueSetMemberHandle_t xReturn = NULL; - - ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */ - return xReturn; - } - -#endif /* configUSE_QUEUE_SETS */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_QUEUE_SETS == 1 ) - - QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) - { - QueueSetMemberHandle_t xReturn = NULL; - - ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */ - return xReturn; - } - -#endif /* configUSE_QUEUE_SETS */ -/*-----------------------------------------------------------*/ - -#if ( configUSE_QUEUE_SETS == 1 ) - - static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) - { - Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; - BaseType_t xReturn = pdFALSE; - - /* This function must be called form a critical section. */ - - /* The following line is not reachable in unit tests because every call - * to prvNotifyQueueSetContainer is preceded by a check that - * pxQueueSetContainer != NULL */ - configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */ - configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); - - if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) - { - const int8_t cTxLock = pxQueueSetContainer->cTxLock; - - traceQUEUE_SET_SEND( pxQueueSetContainer ); - - /* The data copied is the handle of the queue that contains data. */ - xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); - - if( cTxLock == queueUNLOCKED ) - { - if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - return xReturn; - } - -#endif /* configUSE_QUEUE_SETS */ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified + * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined + * for the header files above, but not in this file, in order to generate the + * correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + + +/* Constants used with the cRxLock and cTxLock structure members. */ +#define queueUNLOCKED ( ( int8_t ) -1 ) +#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) +#define queueINT8_MAX ( ( int8_t ) 127 ) + +/* When the Queue_t structure is used to represent a base queue its pcHead and + * pcTail members are used as pointers into the queue storage area. When the + * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are + * not necessary, and the pcHead pointer is set to NULL to indicate that the + * structure instead holds a pointer to the mutex holder (if any). Map alternative + * names to the pcHead and structure member to ensure the readability of the code + * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form + * a union as their usage is mutually exclusive dependent on what the queue is + * being used for. */ +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL + +typedef struct QueuePointers +{ + int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */ +} QueuePointers_t; + +typedef struct SemaphoreData +{ + TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */ + UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ +} SemaphoreData_t; + +/* Semaphores do not actually store or copy data, so have an item size of + * zero. */ +#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 ) +#define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + +#if ( configUSE_PREEMPTION == 0 ) + +/* If the cooperative scheduler is being used then a yield should not be + * performed just because a higher priority task has been woken. */ + #define queueYIELD_IF_USING_PREEMPTION() +#else + #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* + * Definition of the queue used by the scheduler. + * Items are queued by copy, not reference. See the following link for the + * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html + */ +typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + int8_t * pcHead; /**< Points to the beginning of the queue storage area. */ + int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */ + + union + { + QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */ + SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */ + } u; + + List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + + volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */ + UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */ + + volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + + #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + struct QueueDefinition * pxQueueSetContainer; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxQueueNumber; + uint8_t ucQueueType; + #endif +} xQUEUE; + +/* The old xQUEUE name is maintained above then typedefed to the new Queue_t + * name below to enable the use of older kernel aware debuggers. */ +typedef xQUEUE Queue_t; + +/*-----------------------------------------------------------*/ + +/* + * The queue registry is just a means for kernel aware debuggers to locate + * queue structures. It has no other purpose so is an optional component. + */ +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +/* The type stored within the queue registry array. This allows a name + * to be assigned to each queue making kernel aware debugging a little + * more user friendly. */ + typedef struct QUEUE_REGISTRY_ITEM + { + const char * pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + QueueHandle_t xHandle; + } xQueueRegistryItem; + +/* The old xQueueRegistryItem name is maintained above then typedefed to the + * new xQueueRegistryItem name below to enable the use of older kernel aware + * debuggers. */ + typedef xQueueRegistryItem QueueRegistryItem_t; + +/* The queue registry is simply an array of QueueRegistryItem_t structures. + * The pcQueueName member of a structure being NULL is indicative of the + * array position being vacant. */ + PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; + +#endif /* configQUEUE_REGISTRY_SIZE */ + +/* + * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not + * prevent an ISR from adding or removing items to the queue, but does prevent + * an ISR from removing tasks from the queue event lists. If an ISR finds a + * queue is locked it will instead increment the appropriate queue lock count + * to indicate that a task may require unblocking. When the queue in unlocked + * these lock counts are inspected, and the appropriate action taken. + */ +static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any data in a queue. + * + * @return pdTRUE if the queue contains no items, otherwise pdFALSE. + */ +static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any space in a queue. + * + * @return pdTRUE if there is no space, otherwise pdFALSE; + */ +static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Copies an item into the queue, either at the front of the queue or the + * back of the queue. + */ +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, + const void * pvItemToQueue, + const BaseType_t xPosition ) PRIVILEGED_FUNCTION; + +/* + * Copies an item out of a queue. + */ +static void prvCopyDataFromQueue( Queue_t * const pxQueue, + void * const pvBuffer ) PRIVILEGED_FUNCTION; + +#if ( configUSE_QUEUE_SETS == 1 ) + +/* + * Checks to see if a queue is a member of a queue set, and if so, notifies + * the queue set that the queue contains data. + */ + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif + +/* + * Called after a Queue_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + uint8_t * pucQueueStorage, + const uint8_t ucQueueType, + Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION; + +/* + * Mutexes are a special type of queue. When a mutex is created, first the + * queue is created, then prvInitialiseMutex() is called to configure the queue + * as a mutex. + */ +#if ( configUSE_MUTEXES == 1 ) + static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION; +#endif + +#if ( configUSE_MUTEXES == 1 ) + +/* + * If a task waiting for a mutex causes the mutex holder to inherit a + * priority, but the waiting task times out, then the holder should + * disinherit the priority - but only down to the highest priority of any + * other tasks that are waiting for the same mutex. This function returns + * that priority. + */ + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif +/*-----------------------------------------------------------*/ + +/* + * Macro to mark a queue as locked. Locking a queue prevents an ISR from + * accessing the queue event lists. + */ +#define prvLockQueue( pxQueue ) \ + taskENTER_CRITICAL(); \ + { \ + if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \ + } \ + if( ( pxQueue )->cTxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ + } \ + } \ + taskEXIT_CRITICAL() + +/* + * Macro to increment cTxLock member of the queue data structure. It is + * capped at the number of tasks in the system as we cannot unblock more + * tasks than the number of tasks in the system. + */ +#define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ + do { \ + const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ + if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \ + { \ + configASSERT( ( cTxLock ) != queueINT8_MAX ); \ + ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \ + } \ + } while( 0 ) + +/* + * Macro to increment cRxLock member of the queue data structure. It is + * capped at the number of tasks in the system as we cannot unblock more + * tasks than the number of tasks in the system. + */ +#define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ + do { \ + const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ + if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \ + { \ + configASSERT( ( cRxLock ) != queueINT8_MAX ); \ + ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ + } \ + } while( 0 ) +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, + BaseType_t xNewQueue ) +{ + BaseType_t xReturn = pdPASS; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + if( ( pxQueue != NULL ) && + ( pxQueue->uxLength >= 1U ) && + /* Check for multiplication overflow. */ + ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) + { + taskENTER_CRITICAL(); + { + pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; + pxQueue->pcWriteTo = pxQueue->pcHead; + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + + if( xNewQueue == pdFALSE ) + { + /* If there are tasks blocked waiting to read from the queue, then + * the tasks will remain blocked as after this function exits the queue + * will still be empty. If there are tasks blocked waiting to write to + * the queue, then one should be unblocked as after this function exits + * it will be possible to write to it. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Ensure the event queues start in the correct state. */ + vListInitialise( &( pxQueue->xTasksWaitingToSend ) ); + vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); + } + } + taskEXIT_CRITICAL(); + } + else + { + xReturn = pdFAIL; + } + + configASSERT( xReturn != pdFAIL ); + + /* A value is returned for calling semantic consistency with previous + * versions. */ + return xReturn; +} +/*-----------------------------------------------------------*/ + +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + uint8_t * pucQueueStorage, + StaticQueue_t * pxStaticQueue, + const uint8_t ucQueueType ) + { + Queue_t * pxNewQueue = NULL; + + /* The StaticQueue_t structure and the queue storage area must be + * supplied. */ + configASSERT( pxStaticQueue ); + + if( ( uxQueueLength > ( UBaseType_t ) 0 ) && + ( pxStaticQueue != NULL ) && + + /* A queue storage area should be provided if the item size is not 0, and + * should not be provided if the item size is 0. */ + ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ) && + ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ) ) + { + #if ( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + * variable of type StaticQueue_t or StaticSemaphore_t equals the size of + * the real queue and semaphore structures. */ + volatile size_t xSize = sizeof( StaticQueue_t ); + + /* This assertion cannot be branch covered in unit tests */ + configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */ + ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ + } + #endif /* configASSERT_DEFINED */ + + /* The address of a statically allocated queue was passed in, use it. + * The address of a statically allocated storage area was also passed in + * but is already set. */ + pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Queues can be allocated wither statically or dynamically, so + * note this queue was allocated statically in case the queue is + * later deleted. */ + pxNewQueue->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + configASSERT( pxNewQueue ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + const uint8_t ucQueueType ) + { + Queue_t * pxNewQueue = NULL; + size_t xQueueSizeInBytes; + uint8_t * pucQueueStorage; + + if( ( uxQueueLength > ( UBaseType_t ) 0 ) && + /* Check for multiplication overflow. */ + ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) && + /* Check for addition overflow. */ + ( ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) ) + { + /* Allocate enough space to hold the maximum number of items that + * can be in the queue at any time. It is valid for uxItemSize to be + * zero in the case the queue is used as a semaphore. */ + xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Allocate the queue and storage area. Justification for MISRA + * deviation as follows: pvPortMalloc() always ensures returned memory + * blocks are aligned per the requirements of the MCU stack. In this case + * pvPortMalloc() must return a pointer that is guaranteed to meet the + * alignment requirements of the Queue_t structure - which in this case + * is an int8_t *. Therefore, whenever the stack alignment requirements + * are greater than or equal to the pointer to char requirements the cast + * is safe. In other cases alignment requirements are not strict (one or + * two bytes). */ + pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */ + + if( pxNewQueue != NULL ) + { + /* Jump past the queue structure to find the location of the queue + * storage area. */ + pucQueueStorage = ( uint8_t * ) pxNewQueue; + pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Queues can be created either statically or dynamically, so + * note this task was created dynamically in case it is later + * deleted. */ + pxNewQueue->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + configASSERT( pxNewQueue ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + uint8_t * pucQueueStorage, + const uint8_t ucQueueType, + Queue_t * pxNewQueue ) +{ + /* Remove compiler warnings about unused parameters should + * configUSE_TRACE_FACILITY not be set to 1. */ + ( void ) ucQueueType; + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* No RAM was allocated for the queue storage area, but PC head cannot + * be set to NULL because NULL is used as a key to say the queue is used as + * a mutex. Therefore just set pcHead to point to the queue as a benign + * value that is known to be within the memory map. */ + pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; + } + else + { + /* Set the head to the start of the queue storage area. */ + pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; + } + + /* Initialise the queue members as described where the queue type is + * defined. */ + pxNewQueue->uxLength = uxQueueLength; + pxNewQueue->uxItemSize = uxItemSize; + ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + pxNewQueue->ucQueueType = ucQueueType; + } + #endif /* configUSE_TRACE_FACILITY */ + + #if ( configUSE_QUEUE_SETS == 1 ) + { + pxNewQueue->pxQueueSetContainer = NULL; + } + #endif /* configUSE_QUEUE_SETS */ + + traceQUEUE_CREATE( pxNewQueue ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + static void prvInitialiseMutex( Queue_t * pxNewQueue ) + { + if( pxNewQueue != NULL ) + { + /* The queue create function will set all the queue structure members + * correctly for a generic queue, but this function is creating a + * mutex. Overwrite those members that need to be set differently - + * in particular the information required for priority inheritance. */ + pxNewQueue->u.xSemaphore.xMutexHolder = NULL; + pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX; + + /* In case this is a recursive mutex. */ + pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; + + traceCREATE_MUTEX( pxNewQueue ); + + /* Start with the semaphore in the expected state. */ + ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK ); + } + else + { + traceCREATE_MUTEX_FAILED(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, + StaticQueue_t * pxStaticQueue ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + /* Prevent compiler warnings about unused parameters if + * configUSE_TRACE_FACILITY does not equal 1. */ + ( void ) ucQueueType; + + xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore; + + configASSERT( xSemaphore ); + + /* This function is called by xSemaphoreGetMutexHolder(), and should not + * be called directly. Note: This is a good way of determining if the + * calling task is the mutex holder, but not a good way of determining the + * identity of the mutex holder, as the holder may change between the + * following critical section exiting and the function returning. */ + taskENTER_CRITICAL(); + { + if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + } + taskEXIT_CRITICAL(); + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + + configASSERT( xSemaphore ); + + /* Mutexes cannot be used in interrupt service routines, so the mutex + * holder should not change in an ISR, and therefore a critical section is + * not required here. */ + if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* If this is the task that holds the mutex then xMutexHolder will not + * change outside of this task. If this task does not hold the mutex then + * pxMutexHolder can never coincidentally equal the tasks handle, and as + * this is the only condition we are interested in it does not matter if + * pxMutexHolder is accessed simultaneously by another task. Therefore no + * mutual exclusion is required to test the pxMutexHolder variable. */ + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + traceGIVE_MUTEX_RECURSIVE( pxMutex ); + + /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to + * the task handle, therefore no underflow check is required. Also, + * uxRecursiveCallCount is only modified by the mutex holder, and as + * there can only be one, no mutual exclusion is required to modify the + * uxRecursiveCallCount member. */ + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--; + + /* Has the recursive call count unwound to 0? */ + if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 ) + { + /* Return the mutex. This will automatically unblock any other + * task that might be waiting to access the mutex. */ + ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + /* The mutex cannot be given because the calling task is not the + * holder. */ + xReturn = pdFAIL; + + traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* Comments regarding mutual exclusion as per those within + * xQueueGiveMutexRecursive(). */ + + traceTAKE_MUTEX_RECURSIVE( pxMutex ); + + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + xReturn = pdPASS; + } + else + { + xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait ); + + /* pdPASS will only be returned if the mutex was successfully + * obtained. The calling task may have entered the Blocked state + * before reaching here. */ + if( xReturn != pdFAIL ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + } + else + { + traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount, + StaticQueue_t * pxStaticQueue ) + { + QueueHandle_t xHandle = NULL; + + if( ( uxMaxCount != 0 ) && + ( uxInitialCount <= uxMaxCount ) ) + { + xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + } + else + { + configASSERT( xHandle ); + mtCOVERAGE_TEST_MARKER(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount ) + { + QueueHandle_t xHandle = NULL; + + if( ( uxMaxCount != 0 ) && + ( uxInitialCount <= uxMaxCount ) ) + { + xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + } + else + { + configASSERT( xHandle ); + mtCOVERAGE_TEST_MARKER(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) +{ + BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired; + TimeOut_t xTimeOut; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + /*lint -save -e904 This function relaxes the coding standard somewhat to + * allow return statements within the function itself. This is done in the + * interest of execution time efficiency. */ + for( ; ; ) + { + taskENTER_CRITICAL(); + { + /* Is there room on the queue now? The running task must be the + * highest priority task wanting to access the queue. If the head item + * in the queue is to be overwritten then it does not matter if the + * queue is full. */ + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + traceQUEUE_SEND( pxQueue ); + + #if ( configUSE_QUEUE_SETS == 1 ) + { + const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; + + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) + { + /* Do not notify the queue set as an existing item + * was overwritten in the queue so the number of items + * in the queue has not changed. */ + mtCOVERAGE_TEST_MARKER(); + } + else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + * to the queue set caused a higher priority task to + * unblock. A context switch is required. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If there was a task waiting for data to arrive on the + * queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + * our own so yield immediately. Yes it is ok to + * do this from within the critical section - the + * kernel takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + * executed if the task was holding multiple mutexes + * and the mutexes were given back in an order that is + * different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* If there was a task waiting for data to arrive on the + * queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + * our own so yield immediately. Yes it is ok to do + * this from within the critical section - the kernel + * takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + * executed if the task was holding multiple mutexes and + * the mutexes were given back in an order that is + * different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was full and no block time is specified (or + * the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + + /* Return to the original privilege level before exiting + * the function. */ + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was full and a block time was specified so + * configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + * event list. It is possible that interrupts occurring now + * remove this task from the event list again - but as the + * scheduler is suspended the task will go onto the pending + * ready list instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + * ready list into the ready list - so it is feasible that this + * task is already in the ready list before it yields - in which + * case the yield will not cause a context switch unless there + * is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, + const void * const pvItemToQueue, + BaseType_t * const pxHigherPriorityTaskWoken, + const BaseType_t xCopyPosition ) +{ + BaseType_t xReturn; + portBASE_TYPE xSavedInterruptStatus; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + * system call (or maximum API call) interrupt priority. Interrupts that are + * above the maximum system call priority are kept permanently enabled, even + * when the RTOS kernel is in a critical section, but cannot make any calls to + * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has been + * assigned a priority above the configured maximum system call priority. + * Only FreeRTOS functions that end in FromISR can be called from interrupts + * that have been assigned a priority at or (logically) below the maximum + * system call interrupt priority. FreeRTOS maintains a separate interrupt + * safe API to ensure interrupt entry is as fast and as simple as possible. + * More information (albeit Cortex-M specific) is provided on the following + * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + /* Similar to xQueueGenericSend, except without blocking if there is no room + * in the queue. Also don't directly wake a task that was blocked on a queue + * read, instead return a flag to say whether a context switch is required or + * not (i.e. has a task with a higher priority than us been woken by this + * post). */ + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + const int8_t cTxLock = pxQueue->cTxLock; + const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a + * semaphore or mutex. That means prvCopyDataToQueue() cannot result + * in a task disinheriting a priority and prvCopyDataToQueue() can be + * called here even though the disinherit function does not check if + * the scheduler is suspended before accessing the ready lists. */ + ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* The event list is not altered if the queue is locked. This will + * be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) + { + /* Do not notify the queue set as an existing item + * was overwritten in the queue so the number of items + * in the queue has not changed. */ + mtCOVERAGE_TEST_MARKER(); + } + else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + * to the queue set caused a higher priority task to + * unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + * record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + * context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Not used in this path. */ + ( void ) uxPreviousMessagesWaiting; + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + prvIncrementQueueTxLock( pxQueue, cTxLock ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ + BaseType_t xReturn; + portBASE_TYPE xSavedInterruptStatus; + Queue_t * const pxQueue = xQueue; + + /* Similar to xQueueGenericSendFromISR() but used with semaphores where the + * item size is 0. Don't directly wake a task that was blocked on a queue + * read, instead return a flag to say whether a context switch is required or + * not (i.e. has a task with a higher priority than us been woken by this + * post). */ + + configASSERT( pxQueue ); + + /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR() + * if the item size is not 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Normally a mutex would not be given from an interrupt, especially if + * there is a mutex holder, as priority inheritance makes no sense for an + * interrupts, only tasks. */ + configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + * system call (or maximum API call) interrupt priority. Interrupts that are + * above the maximum system call priority are kept permanently enabled, even + * when the RTOS kernel is in a critical section, but cannot make any calls to + * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has been + * assigned a priority above the configured maximum system call priority. + * Only FreeRTOS functions that end in FromISR can be called from interrupts + * that have been assigned a priority at or (logically) below the maximum + * system call interrupt priority. FreeRTOS maintains a separate interrupt + * safe API to ensure interrupt entry is as fast and as simple as possible. + * More information (albeit Cortex-M specific) is provided on the following + * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* When the queue is used to implement a semaphore no data is ever + * moved through the queue but it is still valid to see if the queue 'has + * space'. */ + if( uxMessagesWaiting < pxQueue->uxLength ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* A task can only have an inherited priority if it is a mutex + * holder - and if there is a mutex holder then the mutex cannot be + * given from an ISR. As this is the ISR version of the function it + * can be assumed there is no mutex holder and no need to determine if + * priority disinheritance is needed. Simply increase the count of + * messages (semaphores) available. */ + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + /* The event list is not altered if the queue is locked. This will + * be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The semaphore is a member of a queue set, and + * posting to the queue set caused a higher priority + * task to unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + * record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + * context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + prvIncrementQueueTxLock( pxQueue, cTxLock ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) +{ + BaseType_t xEntryTimeSet = pdFALSE; + TimeOut_t xTimeOut; + Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + * is zero (so no data is copied into the buffer). */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + /*lint -save -e904 This function relaxes the coding standard somewhat to + * allow return statements within the function itself. This is done in the + * interest of execution time efficiency. */ + for( ; ; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + * must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data available, remove one item. */ + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_RECEIVE( pxQueue ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* There is now space in the queue, were any tasks waiting to + * post to the queue? If so, unblock the highest priority waiting + * task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + * the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + * configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + * the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + * data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + * back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) +{ + BaseType_t xEntryTimeSet = pdFALSE; + TimeOut_t xTimeOut; + Queue_t * const pxQueue = xQueue; + + #if ( configUSE_MUTEXES == 1 ) + BaseType_t xInheritanceOccurred = pdFALSE; + #endif + + /* Check the queue pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* Check this really is a semaphore, in which case the item size will be + * 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + /*lint -save -e904 This function relaxes the coding standard somewhat to allow return + * statements within the function itself. This is done in the interest + * of execution time efficiency. */ + for( ; ; ) + { + taskENTER_CRITICAL(); + { + /* Semaphores are queues with an item size of 0, and where the + * number of messages in the queue is the semaphore's count value. */ + const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + * must be the highest priority task wanting to access the queue. */ + if( uxSemaphoreCount > ( UBaseType_t ) 0 ) + { + traceQUEUE_RECEIVE( pxQueue ); + + /* Semaphores are queues with a data size of zero and where the + * messages waiting is the semaphore's count. Reduce the count. */ + pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1; + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* Record the information required to implement + * priority inheritance should it become necessary. */ + pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + + /* Check to see if other tasks are blocked waiting to give the + * semaphore, and if so, unblock the highest priority such task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The semaphore count was 0 and no block time is specified + * (or the block time has expired) so exit now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The semaphore count was 0 and a block time was specified + * so configure the timeout structure ready to block. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can give to and take from the semaphore + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + * count is 0 then enter the Blocked state to wait for a semaphore to + * become available. As semaphores are implemented with queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL(); + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There was no timeout and the semaphore count was not 0, so + * attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + /* If the semaphore count is 0 exit now as the timeout has + * expired. Otherwise return to attempt to take the semaphore that is + * known to be available. As semaphores are implemented by queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + * test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL(); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + * task to inherit this task's priority. Now this task + * has timed out the priority should be disinherited + * again, but only as low as the next highest priority + * task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL(); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) +{ + BaseType_t xEntryTimeSet = pdFALSE; + TimeOut_t xTimeOut; + int8_t * pcOriginalReadPosition; + Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + * is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + /*lint -save -e904 This function relaxes the coding standard somewhat to + * allow return statements within the function itself. This is done in the + * interest of execution time efficiency. */ + for( ; ; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + * must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Remember the read position so it can be reset after the data + * is read from the queue as this function is only peeking the + * data, not removing it. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_PEEK( pxQueue ); + + /* The data is not being removed, so reset the read pointer. */ + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + /* The data is being left in the queue, so see if there are + * any other tasks waiting for the data. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than this task. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + * the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + * configure the timeout structure ready to enter the blocked + * state. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + * now that the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + * queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + * state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + * exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, + void * const pvBuffer, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ + BaseType_t xReturn; + portBASE_TYPE xSavedInterruptStatus; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + * system call (or maximum API call) interrupt priority. Interrupts that are + * above the maximum system call priority are kept permanently enabled, even + * when the RTOS kernel is in a critical section, but cannot make any calls to + * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has been + * assigned a priority above the configured maximum system call priority. + * Only FreeRTOS functions that end in FromISR can be called from interrupts + * that have been assigned a priority at or (logically) below the maximum + * system call interrupt priority. FreeRTOS maintains a separate interrupt + * safe API to ensure interrupt entry is as fast and as simple as possible. + * More information (albeit Cortex-M specific) is provided on the following + * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Cannot block in an ISR, so check there is data available. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + const int8_t cRxLock = pxQueue->cRxLock; + + traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* If the queue is locked the event list will not be modified. + * Instead update the lock count so the task that unlocks the queue + * will know that an ISR has removed data while the queue was + * locked. */ + if( cRxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than us so + * force a context switch. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was removed while it was locked. */ + prvIncrementQueueRxLock( pxQueue, cRxLock ); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, + void * const pvBuffer ) +{ + BaseType_t xReturn; + portBASE_TYPE xSavedInterruptStatus; + int8_t * pcOriginalReadPosition; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */ + + /* RTOS ports that support interrupt nesting have the concept of a maximum + * system call (or maximum API call) interrupt priority. Interrupts that are + * above the maximum system call priority are kept permanently enabled, even + * when the RTOS kernel is in a critical section, but cannot make any calls to + * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has been + * assigned a priority above the configured maximum system call priority. + * Only FreeRTOS functions that end in FromISR can be called from interrupts + * that have been assigned a priority at or (logically) below the maximum + * system call interrupt priority. FreeRTOS maintains a separate interrupt + * safe API to ensure interrupt entry is as fast and as simple as possible. + * More information (albeit Cortex-M specific) is provided on the following + * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Cannot block in an ISR, so check there is data available. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + traceQUEUE_PEEK_FROM_ISR( pxQueue ); + + /* Remember the read position so it can be reset as nothing is + * actually being removed from the queue. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) +{ + UBaseType_t uxReturn; + + configASSERT( xQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) +{ + UBaseType_t uxReturn; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) +{ + UBaseType_t uxReturn; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + uxReturn = pxQueue->uxMessagesWaiting; + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +void vQueueDelete( QueueHandle_t xQueue ) +{ + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + traceQUEUE_DELETE( pxQueue ); + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + vQueueUnregisterQueue( pxQueue ); + } + #endif + + #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The queue can only have been allocated dynamically - free it + * again. */ + vPortFree( pxQueue ); + } + #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The queue could have been allocated statically or dynamically, so + * check before attempting to free the memory. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxQueue ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */ + { + /* The queue must have been statically allocated, so is not going to be + * deleted. Avoid compiler warnings about the unused parameter. */ + ( void ) pxQueue; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vQueueSetQueueNumber( QueueHandle_t xQueue, + UBaseType_t uxQueueNumber ) + { + ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->ucQueueType; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) + { + UBaseType_t uxHighestPriorityOfWaitingTasks; + + /* If a task waiting for a mutex causes the mutex holder to inherit a + * priority, but the waiting task times out, then the holder should + * disinherit the priority - but only down to the highest priority of any + * other tasks that are waiting for the same mutex. For this purpose, + * return the priority of the highest priority task that is waiting for the + * mutex. */ + if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U ) + { + uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ); + } + else + { + uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY; + } + + return uxHighestPriorityOfWaitingTasks; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, + const void * pvItemToQueue, + const BaseType_t xPosition ) +{ + BaseType_t xReturn = pdFALSE; + UBaseType_t uxMessagesWaiting; + + /* This function is called from a critical section. */ + + uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + if( pxQueue->uxItemSize == ( UBaseType_t ) 0 ) + { + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* The mutex is no longer being held. */ + xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder ); + pxQueue->u.xSemaphore.xMutexHolder = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + } + else if( xPosition == queueSEND_TO_BACK ) + { + ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + + if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->pcWriteTo = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */ + pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize; + + if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xPosition == queueOVERWRITE ) + { + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* An item is not being added but overwritten, so subtract + * one from the recorded number of items in the queue so when + * one is added again below the number of recorded items remains + * correct. */ + --uxMessagesWaiting; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static void prvCopyDataFromQueue( Queue_t * const pxQueue, + void * const pvBuffer ) +{ + if( pxQueue->uxItemSize != ( UBaseType_t ) 0 ) + { + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */ + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + } +} +/*-----------------------------------------------------------*/ + +static void prvUnlockQueue( Queue_t * const pxQueue ) +{ + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ + + /* The lock counts contains the number of extra data items placed or + * removed from the queue while the queue was locked. When a queue is + * locked items can be added or removed, but the event lists cannot be + * updated. */ + taskENTER_CRITICAL(); + { + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) + { + /* Data was posted while the queue was locked. Are any tasks + * blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + * the queue set caused a higher priority task to unblock. + * A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Tasks that are removed from the event list will get + * added to the pending ready list as the scheduler is still + * suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + * context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + /* Tasks that are removed from the event list will get added to + * the pending ready list as the scheduler is still suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that + * a context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; + } + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL(); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --cRxLock; + } + else + { + break; + } + } + + pxQueue->cRxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) +{ + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) +{ + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) +{ + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) +{ + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + QueueRegistryItem_t * pxEntryToWrite = NULL; + + configASSERT( xQueue ); + + if( pcQueueName != NULL ) + { + /* See if there is an empty space in the registry. A NULL name denotes + * a free slot. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + /* Replace an existing entry if the queue is already in the registry. */ + if( xQueue == xQueueRegistry[ ux ].xHandle ) + { + pxEntryToWrite = &( xQueueRegistry[ ux ] ); + break; + } + /* Otherwise, store in the next empty location */ + else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) ) + { + pxEntryToWrite = &( xQueueRegistry[ ux ] ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + + if( pxEntryToWrite != NULL ) + { + /* Store the information on this queue. */ + pxEntryToWrite->pcQueueName = pcQueueName; + pxEntryToWrite->xHandle = xQueue; + + traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + } + } + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + const char * pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + const char * pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + configASSERT( xQueue ); + + /* Note there is nothing here to protect against another task adding or + * removing entries from the registry while it is being searched. */ + + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + pcReturn = xQueueRegistry[ ux ].pcQueueName; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return pcReturn; + } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueUnregisterQueue( QueueHandle_t xQueue ) + { + UBaseType_t ux; + + configASSERT( xQueue ); + + /* See if the handle of the queue being unregistered in actually in the + * registry. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + /* Set the name to NULL to show that this slot if free again. */ + xQueueRegistry[ ux ].pcQueueName = NULL; + + /* Set the handle to NULL to ensure the same queue handle cannot + * appear in the registry twice if it is added, removed, then + * added again. */ + xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + + void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, + TickType_t xTicksToWait, + const BaseType_t xWaitIndefinitely ) + { + Queue_t * const pxQueue = xQueue; + + /* This function should not be called by application code hence the + * 'Restricted' in its name. It is not part of the public API. It is + * designed for use by kernel code, and has special calling requirements. + * It can result in vListInsert() being called on a list that can only + * possibly ever have one item in it, so the list will be fast, but even + * so it should be called with the scheduler locked and not from a critical + * section. */ + + /* Only do anything if there are no messages in the queue. This function + * will not actually cause the task to block, just place it on a blocked + * list. It will not block until the scheduler is unlocked - at which + * time a yield will be performed. If an item is added to the queue while + * the queue is locked, and the calling task blocks on the queue, then the + * calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + { + /* There is nothing in the queue, block for the specified period. */ + vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvUnlockQueue( pxQueue ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) + { + QueueSetHandle_t pxQueue; + + pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET ); + + return pxQueue; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) + { + /* Cannot add a queue/semaphore to more than one queue set. */ + xReturn = pdFAIL; + } + else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* Cannot add a queue/semaphore to a queue set if there are already + * items in the queue/semaphore. */ + xReturn = pdFAIL; + } + else + { + ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; + xReturn = pdPASS; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore; + + if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet ) + { + /* The queue was not a member of the set. */ + xReturn = pdFAIL; + } + else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* It is dangerous to remove a queue from a set when the queue is + * not empty because the queue set will still hold pending events for + * the queue. */ + xReturn = pdFAIL; + } + else + { + taskENTER_CRITICAL(); + { + /* The queue is no longer contained in the set. */ + pxQueueOrSemaphore->pxQueueSetContainer = NULL; + } + taskEXIT_CRITICAL(); + xReturn = pdPASS; + } + + return xReturn; + } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */ + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + TickType_t const xTicksToWait ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) + { + Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; + BaseType_t xReturn = pdFALSE; + + /* This function must be called form a critical section. */ + + /* The following line is not reachable in unit tests because every call + * to prvNotifyQueueSetContainer is preceded by a check that + * pxQueueSetContainer != NULL */ + configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */ + configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); + + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + { + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + + traceQUEUE_SET_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); + + if( cTxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ From eb83cad241685a7155025234e468c6dad2065dac Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 00:03:50 -0800 Subject: [PATCH 17/24] Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request --- include/FreeRTOS.h | 4 ++++ include/timers.h | 6 +++--- .../ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h | 1 + .../ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h | 1 + .../ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h | 1 + .../ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h | 1 + .../ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h | 1 + .../ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h | 1 + portable/GCC/ARM_CM0/portmacro.h | 1 + portable/GCC/ARM_CM23/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM3/portmacro.h | 1 + portable/GCC/ARM_CM33/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM3_MPU/portmacro.h | 1 + portable/GCC/ARM_CM4F/portmacro.h | 1 + portable/GCC/ARM_CM4_MPU/portmacro.h | 1 + portable/GCC/ARM_CM55/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM7/r0p1/portmacro.h | 1 + portable/GCC/ARM_CM85/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 1 + portable/ThirdParty/GCC/Posix/portmacro.h | 2 ++ portable/ThirdParty/GCC/RP2040/include/portmacro.h | 1 + portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h | 2 ++ tasks.c | 2 +- timers.c | 2 +- 27 files changed, 34 insertions(+), 5 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 790a06db058..3551af4984e 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -904,6 +904,10 @@ #define portDONT_DISCARD #endif +#ifndef portNORETURN + #define portNORETURN +#endif + #ifndef configUSE_TIME_SLICING #define configUSE_TIME_SLICING 1 #endif diff --git a/include/timers.h b/include/timers.h index fa928f0e559..6a064d62a41 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1361,15 +1361,15 @@ BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, #endif -#if ( configUSE_DAEMON_TASK_STARTUP_HOOK != 0 ) +#if ( configUSE_DAEMON_TASK_STARTUP_HOOK != 0 ) /** * timers.h * @code{c} - * void vApplicationTickHook( void ); + * void vApplicationDaemonTaskStartupHook( void ); * @endcode * - * This hook function is called in the timers task before task executes. + * This hook function is called form the timer task once when the task starts running. */ void vApplicationDaemonTaskStartupHook( void ); diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index b654748e138..35c160f3407 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index 830fa2c1379..69d32d7f57a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index 6d60fce4c90..408162d6402 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -77,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index c1f7b899217..dd729f13f2c 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -77,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index d3b3af58f4c..25058026973 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -113,6 +113,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index 1ffdc5b8b93..a3b2b46c989 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -80,6 +80,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index f3365d14371..df23d95381f 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -203,6 +203,7 @@ typedef struct MPU_SETTINGS #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index b654748e138..35c160f3407 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index b654748e138..35c160f3407 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index d65a4c24932..897fef5f468 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -77,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index 830fa2c1379..69d32d7f57a 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index 830fa2c1379..69d32d7f57a 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index a5173871359..2061c323642 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -68,6 +68,8 @@ typedef unsigned long TickType_t; /*-----------------------------------------------------------*/ /* Architecture specifics. */ +#define portNORETURN __attribute__( ( noreturn ) ) + #define portSTACK_GROWTH ( -1 ) #define portHAS_STACK_OVERFLOW_CHECKING ( 1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index f19e96a0141..1043d2c5587 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -78,6 +78,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config, * as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */ #define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h index e87d560cfcd..575659ac597 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h @@ -335,6 +335,8 @@ /*-----------------------------------------------------------*/ /* Architecture specifics. */ + #define portNORETURN __attribute__( ( noreturn ) ) + #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 4 diff --git a/tasks.c b/tasks.c index 6cf0565f593..8fa1703ba65 100644 --- a/tasks.c +++ b/tasks.c @@ -3429,7 +3429,7 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static void prvIdleTask( void * pvParameters ) __attribute__( ( __noreturn__ ) ); +static void prvIdleTask( void * pvParameters ) portNORETURN; portTASK_FUNCTION( prvIdleTask, pvParameters ) { diff --git a/timers.c b/timers.c index 324b95a7207..473152f775d 100644 --- a/timers.c +++ b/timers.c @@ -565,7 +565,7 @@ } /*-----------------------------------------------------------*/ - static portTASK_FUNCTION( prvTimerTask, pvParameters ) __attribute__( ( __noreturn__ ) ); + static portTASK_FUNCTION( prvTimerTask, pvParameters ) portNORETURN; { TickType_t xNextExpireTime; BaseType_t xListWasEmpty; From fda8ac906fa4f907ded175ad42e8942912cfee9b Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 09:35:26 -0800 Subject: [PATCH 18/24] Further clean-up of clang and clang-tidy issues. --- event_groups.c | 6 +++--- portable/MemMang/heap_4.c | 4 ++-- tasks.c | 2 +- timers.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/event_groups.c b/event_groups.c index 80e220d95f0..7c86e605a88 100644 --- a/event_groups.c +++ b/event_groups.c @@ -521,15 +521,15 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index a9cfa45531e..d83928309b4 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -390,7 +390,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ { uxAddress += ( portBYTE_ALIGNMENT - 1 ); uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; + xTotalHeapSize -= ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap ); } pucAlignedHeap = ( uint8_t * ) uxAddress; @@ -402,7 +402,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); uxAddress -= xHeapStructSize; uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); pxEnd = ( BlockLink_t * ) uxAddress; diff --git a/tasks.c b/tasks.c index 8fa1703ba65..55c6ff93386 100644 --- a/tasks.c +++ b/tasks.c @@ -3429,7 +3429,7 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static void prvIdleTask( void * pvParameters ) portNORETURN; +static portTASK_FUNCTION_PROTO ( prvIdleTask, pvParameters ) portNORETURN; portTASK_FUNCTION( prvIdleTask, pvParameters ) { diff --git a/timers.c b/timers.c index 473152f775d..06a29279597 100644 --- a/timers.c +++ b/timers.c @@ -158,7 +158,7 @@ * task. Other tasks communicate with the timer service task using the * xTimerQueue queue. */ - static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; /* * Called by the timer service task to interpret and process a command it @@ -565,7 +565,7 @@ } /*-----------------------------------------------------------*/ - static portTASK_FUNCTION( prvTimerTask, pvParameters ) portNORETURN; + static portTASK_FUNCTION( prvTimerTask, pvParameters ) { TickType_t xNextExpireTime; BaseType_t xListWasEmpty; From 608b7d7b8fa69c5f70c471d1e1e8d93bd89a4b74 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 09:53:00 -0800 Subject: [PATCH 19/24] Removing compiler specific pragmas from common c files. --- portable/MemMang/heap_4.c | 8 -------- stream_buffer.c | 9 --------- 2 files changed, 17 deletions(-) diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index d83928309b4..bcae8b6354d 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -411,15 +411,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* To start with there is a single free block that is sized to take up the * entire heap space, minus the space taken by pxEnd. */ - #if defined( __clang__ ) - /* Alignment checked above with calculations on uxAddress */ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wcast-align" - #endif /* defined(__clang__) */ pxFirstFreeBlock = ( BlockLink_t * ) pucAlignedHeap; - #if defined( __clang__ ) - #pragma clang diagnostic pop - #endif /* defined(__clang__) */ pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock ); pxFirstFreeBlock->pxNextFreeBlock = pxEnd; diff --git a/stream_buffer.c b/stream_buffer.c index eadc0782abe..2f343c71f57 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -371,11 +371,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pucAllocatedMemory = NULL; } - #if defined( __clang__ ) - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wcast-align" - #endif /* defined(__clang__) */ - if( pucAllocatedMemory != NULL ) { prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ @@ -394,10 +389,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, } return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ - - #if defined( __clang__ ) - #pragma clang diagnostic pop - #endif /* defined(__clang__) */ } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ /*-----------------------------------------------------------*/ From 23acff84cad4e62e56af4ba034a59e6ee58ddfb2 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 10:01:42 -0800 Subject: [PATCH 20/24] Fixing missing lexicon entry and uncrustify formatting changes. --- .github/lexicon.txt | 1 + tasks.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 765b05c1eb5..47ce4ccde11 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -2458,6 +2458,7 @@ vaninterruptserviceroutine vanisr vanothertask vapplicationcleartimerinterrupt +vapplicationdaemontaskstartuphook vapplicationexceptionregisterdump vapplicationfpusafeirqhandler vapplicationgetidletaskmemory diff --git a/tasks.c b/tasks.c index 55c6ff93386..a765ecbb8b2 100644 --- a/tasks.c +++ b/tasks.c @@ -3429,7 +3429,7 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION_PROTO ( prvIdleTask, pvParameters ) portNORETURN; +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) portNORETURN; portTASK_FUNCTION( prvIdleTask, pvParameters ) { From e15209f610238565bfe5843c783af793353a76c9 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 10:04:55 -0800 Subject: [PATCH 21/24] Resolving merge issue multiple defnitions of proto for prvIdleTask --- tasks.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tasks.c b/tasks.c index a765ecbb8b2..2b73b1b9703 100644 --- a/tasks.c +++ b/tasks.c @@ -432,7 +432,7 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; /* * Utility to free all memory allocated by the scheduler to hold a TCB, @@ -3429,7 +3429,6 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) portNORETURN; portTASK_FUNCTION( prvIdleTask, pvParameters ) { From 31c6261722e507718b4b74f95a552f714bd47c9d Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 10:12:35 -0800 Subject: [PATCH 22/24] Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. --- portable/ThirdParty/GCC/Posix/port.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index ff9a0cdea03..82aa27ba889 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -144,7 +144,7 @@ portSTACK_TYPE * pxPortInitialiseStack( portSTACK_TYPE * pxTopOfStack, */ thread = ( Thread_t * ) ( pxTopOfStack + 1 ) - 1; pxTopOfStack = ( portSTACK_TYPE * ) thread - 1; - ulStackSize = (size_t)( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); + ulStackSize = ( size_t )( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); thread->pxCode = pxCode; thread->pvParams = pvParameters; @@ -343,7 +343,7 @@ static uint64_t prvGetTimeNs( void ) clock_gettime( CLOCK_MONOTONIC, &t ); - return (uint64_t)t.tv_sec * 1000000000ULL + (uint64_t)t.tv_nsec; + return ( uint64_t )t.tv_sec * 1000000000ULL + ( uint64_t )t.tv_nsec; } static uint64_t prvStartTimeNs; From 8839aa496c6e3674fdc553f50d0f7942bf3b5ab7 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 10:17:59 -0800 Subject: [PATCH 23/24] More uncrustify formatting issues. --- portable/MemMang/heap_4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index bcae8b6354d..8ca7c81e4f3 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -402,7 +402,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); + uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); uxAddress -= xHeapStructSize; uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); pxEnd = ( BlockLink_t * ) uxAddress; From 1169b3be49d27db925d008eced96e19a5ff30a26 Mon Sep 17 00:00:00 2001 From: Paul Helter Date: Mon, 13 Feb 2023 10:36:05 -0800 Subject: [PATCH 24/24] Fixing extra bracket in #if statement. --- stream_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream_buffer.c b/stream_buffer.c index 2f343c71f57..0d0b3350acd 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -436,7 +436,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); #if ( configASSERT_DEFINED == 1 ) - { + /* Sanity check that the size of the structure used to declare a * variable of type StaticStreamBuffer_t equals the size of the real * message buffer structure. */