event_groups.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /*
  2. * FreeRTOS Kernel V10.4.4
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. *
  5. * SPDX-License-Identifier: MIT
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  8. * this software and associated documentation files (the "Software"), to deal in
  9. * the Software without restriction, including without limitation the rights to
  10. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  11. * the Software, and to permit persons to whom the Software is furnished to do so,
  12. * subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in all
  15. * copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  19. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  20. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  21. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  22. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * https://www.FreeRTOS.org
  25. * https://github.com/FreeRTOS
  26. *
  27. */
  28. /* Standard includes. */
  29. #include <stdlib.h>
  30. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
  31. * all the API functions to use the MPU wrappers. That should only be done when
  32. * task.h is included from an application file. */
  33. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  34. /* FreeRTOS includes. */
  35. #include "FreeRTOS.h"
  36. #include "task.h"
  37. #include "timers.h"
  38. #include "event_groups.h"
  39. /* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
  40. * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
  41. * for the header files above, but not in this file, in order to generate the
  42. * correct privileged Vs unprivileged linkage and placement. */
  43. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021 See comment above. */
  44. /* The following bit fields convey control information in a task's event list
  45. * item value. It is important they don't clash with the
  46. * taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */
  47. #if configUSE_16_BIT_TICKS == 1
  48. #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U
  49. #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U
  50. #define eventWAIT_FOR_ALL_BITS 0x0400U
  51. #define eventEVENT_BITS_CONTROL_BYTES 0xff00U
  52. #else
  53. #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL
  54. #define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL
  55. #define eventWAIT_FOR_ALL_BITS 0x04000000UL
  56. #define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL
  57. #endif
  58. typedef struct EventGroupDef_t
  59. {
  60. EventBits_t uxEventBits;
  61. List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */
  62. #if ( configUSE_TRACE_FACILITY == 1 )
  63. UBaseType_t uxEventGroupNumber;
  64. #endif
  65. #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  66. uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
  67. #endif
  68. } EventGroup_t;
  69. /*-----------------------------------------------------------*/
  70. /*
  71. * Test the bits set in uxCurrentEventBits to see if the wait condition is met.
  72. * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
  73. * pdTRUE then the wait condition is met if all the bits set in uxBitsToWaitFor
  74. * are also set in uxCurrentEventBits. If xWaitForAllBits is pdFALSE then the
  75. * wait condition is met if any of the bits set in uxBitsToWait for are also set
  76. * in uxCurrentEventBits.
  77. */
  78. static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
  79. const EventBits_t uxBitsToWaitFor,
  80. const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
  81. /*-----------------------------------------------------------*/
  82. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  83. EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer )
  84. {
  85. EventGroup_t * pxEventBits;
  86. /* A StaticEventGroup_t object must be provided. */
  87. configASSERT( pxEventGroupBuffer );
  88. #if ( configASSERT_DEFINED == 1 )
  89. {
  90. /* Sanity check that the size of the structure used to declare a
  91. * variable of type StaticEventGroup_t equals the size of the real
  92. * event group structure. */
  93. volatile size_t xSize = sizeof( StaticEventGroup_t );
  94. configASSERT( xSize == sizeof( EventGroup_t ) );
  95. } /*lint !e529 xSize is referenced if configASSERT() is defined. */
  96. #endif /* configASSERT_DEFINED */
  97. /* The user has provided a statically allocated event group - use it. */
  98. pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer; /*lint !e740 !e9087 EventGroup_t and StaticEventGroup_t are deliberately aliased for data hiding purposes and guaranteed to have the same size and alignment requirement - checked by configASSERT(). */
  99. if( pxEventBits != NULL )
  100. {
  101. pxEventBits->uxEventBits = 0;
  102. vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
  103. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  104. {
  105. /* Both static and dynamic allocation can be used, so note that
  106. * this event group was created statically in case the event group
  107. * is later deleted. */
  108. pxEventBits->ucStaticallyAllocated = pdTRUE;
  109. }
  110. #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
  111. traceEVENT_GROUP_CREATE( pxEventBits );
  112. }
  113. else
  114. {
  115. /* xEventGroupCreateStatic should only ever be called with
  116. * pxEventGroupBuffer pointing to a pre-allocated (compile time
  117. * allocated) StaticEventGroup_t variable. */
  118. traceEVENT_GROUP_CREATE_FAILED();
  119. }
  120. return pxEventBits;
  121. }
  122. #endif /* configSUPPORT_STATIC_ALLOCATION */
  123. /*-----------------------------------------------------------*/
  124. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  125. EventGroupHandle_t xEventGroupCreate( void )
  126. {
  127. EventGroup_t * pxEventBits;
  128. /* Allocate the event group. Justification for MISRA deviation as
  129. * follows: pvPortMalloc() always ensures returned memory blocks are
  130. * aligned per the requirements of the MCU stack. In this case
  131. * pvPortMalloc() must return a pointer that is guaranteed to meet the
  132. * alignment requirements of the EventGroup_t structure - which (if you
  133. * follow it through) is the alignment requirements of the TickType_t type
  134. * (EventBits_t being of TickType_t itself). Therefore, whenever the
  135. * stack alignment requirements are greater than or equal to the
  136. * TickType_t alignment requirements the cast is safe. In other cases,
  137. * where the natural word size of the architecture is less than
  138. * sizeof( TickType_t ), the TickType_t variables will be accessed in two
  139. * or more reads operations, and the alignment requirements is only that
  140. * of each individual read. */
  141. pxEventBits = ( EventGroup_t * ) pvPortMalloc( sizeof( EventGroup_t ) ); /*lint !e9087 !e9079 see comment above. */
  142. if( pxEventBits != NULL )
  143. {
  144. pxEventBits->uxEventBits = 0;
  145. vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
  146. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  147. {
  148. /* Both static and dynamic allocation can be used, so note this
  149. * event group was allocated statically in case the event group is
  150. * later deleted. */
  151. pxEventBits->ucStaticallyAllocated = pdFALSE;
  152. }
  153. #endif /* configSUPPORT_STATIC_ALLOCATION */
  154. traceEVENT_GROUP_CREATE( pxEventBits );
  155. }
  156. else
  157. {
  158. traceEVENT_GROUP_CREATE_FAILED(); /*lint !e9063 Else branch only exists to allow tracing and does not generate code if trace macros are not defined. */
  159. }
  160. return pxEventBits;
  161. }
  162. #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
  163. /*-----------------------------------------------------------*/
  164. EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
  165. const EventBits_t uxBitsToSet,
  166. const EventBits_t uxBitsToWaitFor,
  167. TickType_t xTicksToWait )
  168. {
  169. EventBits_t uxOriginalBitValue, uxReturn;
  170. EventGroup_t * pxEventBits = xEventGroup;
  171. BaseType_t xAlreadyYielded;
  172. BaseType_t xTimeoutOccurred = pdFALSE;
  173. configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
  174. configASSERT( uxBitsToWaitFor != 0 );
  175. #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
  176. {
  177. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  178. }
  179. #endif
  180. vTaskSuspendAll();
  181. {
  182. uxOriginalBitValue = pxEventBits->uxEventBits;
  183. ( void ) xEventGroupSetBits( xEventGroup, uxBitsToSet );
  184. if( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) == uxBitsToWaitFor )
  185. {
  186. /* All the rendezvous bits are now set - no need to block. */
  187. uxReturn = ( uxOriginalBitValue | uxBitsToSet );
  188. /* Rendezvous always clear the bits. They will have been cleared
  189. * already unless this is the only task in the rendezvous. */
  190. pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
  191. xTicksToWait = 0;
  192. }
  193. else
  194. {
  195. if( xTicksToWait != ( TickType_t ) 0 )
  196. {
  197. traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor );
  198. /* Store the bits that the calling task is waiting for in the
  199. * task's event list item so the kernel knows when a match is
  200. * found. Then enter the blocked state. */
  201. vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ), xTicksToWait );
  202. /* This assignment is obsolete as uxReturn will get set after
  203. * the task unblocks, but some compilers mistakenly generate a
  204. * warning about uxReturn being returned without being set if the
  205. * assignment is omitted. */
  206. uxReturn = 0;
  207. }
  208. else
  209. {
  210. /* The rendezvous bits were not set, but no block time was
  211. * specified - just return the current event bit value. */
  212. uxReturn = pxEventBits->uxEventBits;
  213. xTimeoutOccurred = pdTRUE;
  214. }
  215. }
  216. }
  217. xAlreadyYielded = xTaskResumeAll();
  218. if( xTicksToWait != ( TickType_t ) 0 )
  219. {
  220. if( xAlreadyYielded == pdFALSE )
  221. {
  222. portYIELD_WITHIN_API();
  223. }
  224. else
  225. {
  226. mtCOVERAGE_TEST_MARKER();
  227. }
  228. /* The task blocked to wait for its required bits to be set - at this
  229. * point either the required bits were set or the block time expired. If
  230. * the required bits were set they will have been stored in the task's
  231. * event list item, and they should now be retrieved then cleared. */
  232. uxReturn = uxTaskResetEventItemValue();
  233. if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
  234. {
  235. /* The task timed out, just return the current event bit value. */
  236. taskENTER_CRITICAL();
  237. {
  238. uxReturn = pxEventBits->uxEventBits;
  239. /* Although the task got here because it timed out before the
  240. * bits it was waiting for were set, it is possible that since it
  241. * unblocked another task has set the bits. If this is the case
  242. * then it needs to clear the bits before exiting. */
  243. if( ( uxReturn & uxBitsToWaitFor ) == uxBitsToWaitFor )
  244. {
  245. pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
  246. }
  247. else
  248. {
  249. mtCOVERAGE_TEST_MARKER();
  250. }
  251. }
  252. taskEXIT_CRITICAL();
  253. xTimeoutOccurred = pdTRUE;
  254. }
  255. else
  256. {
  257. /* The task unblocked because the bits were set. */
  258. }
  259. /* Control bits might be set as the task had blocked should not be
  260. * returned. */
  261. uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
  262. }
  263. traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred );
  264. /* Prevent compiler warnings when trace macros are not used. */
  265. ( void ) xTimeoutOccurred;
  266. return uxReturn;
  267. }
  268. /*-----------------------------------------------------------*/
  269. EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
  270. const EventBits_t uxBitsToWaitFor,
  271. const BaseType_t xClearOnExit,
  272. const BaseType_t xWaitForAllBits,
  273. TickType_t xTicksToWait )
  274. {
  275. EventGroup_t * pxEventBits = xEventGroup;
  276. EventBits_t uxReturn, uxControlBits = 0;
  277. BaseType_t xWaitConditionMet, xAlreadyYielded;
  278. BaseType_t xTimeoutOccurred = pdFALSE;
  279. /* Check the user is not attempting to wait on the bits used by the kernel
  280. * itself, and that at least one bit is being requested. */
  281. configASSERT( xEventGroup );
  282. configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
  283. configASSERT( uxBitsToWaitFor != 0 );
  284. #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
  285. {
  286. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  287. }
  288. #endif
  289. vTaskSuspendAll();
  290. {
  291. const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
  292. /* Check to see if the wait condition is already met or not. */
  293. xWaitConditionMet = prvTestWaitCondition( uxCurrentEventBits, uxBitsToWaitFor, xWaitForAllBits );
  294. if( xWaitConditionMet != pdFALSE )
  295. {
  296. /* The wait condition has already been met so there is no need to
  297. * block. */
  298. uxReturn = uxCurrentEventBits;
  299. xTicksToWait = ( TickType_t ) 0;
  300. /* Clear the wait bits if requested to do so. */
  301. if( xClearOnExit != pdFALSE )
  302. {
  303. pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
  304. }
  305. else
  306. {
  307. mtCOVERAGE_TEST_MARKER();
  308. }
  309. }
  310. else if( xTicksToWait == ( TickType_t ) 0 )
  311. {
  312. /* The wait condition has not been met, but no block time was
  313. * specified, so just return the current value. */
  314. uxReturn = uxCurrentEventBits;
  315. xTimeoutOccurred = pdTRUE;
  316. }
  317. else
  318. {
  319. /* The task is going to block to wait for its required bits to be
  320. * set. uxControlBits are used to remember the specified behaviour of
  321. * this call to xEventGroupWaitBits() - for use when the event bits
  322. * unblock the task. */
  323. if( xClearOnExit != pdFALSE )
  324. {
  325. uxControlBits |= eventCLEAR_EVENTS_ON_EXIT_BIT;
  326. }
  327. else
  328. {
  329. mtCOVERAGE_TEST_MARKER();
  330. }
  331. if( xWaitForAllBits != pdFALSE )
  332. {
  333. uxControlBits |= eventWAIT_FOR_ALL_BITS;
  334. }
  335. else
  336. {
  337. mtCOVERAGE_TEST_MARKER();
  338. }
  339. /* Store the bits that the calling task is waiting for in the
  340. * task's event list item so the kernel knows when a match is
  341. * found. Then enter the blocked state. */
  342. vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | uxControlBits ), xTicksToWait );
  343. /* This is obsolete as it will get set after the task unblocks, but
  344. * some compilers mistakenly generate a warning about the variable
  345. * being returned without being set if it is not done. */
  346. uxReturn = 0;
  347. traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
  348. }
  349. }
  350. xAlreadyYielded = xTaskResumeAll();
  351. if( xTicksToWait != ( TickType_t ) 0 )
  352. {
  353. if( xAlreadyYielded == pdFALSE )
  354. {
  355. portYIELD_WITHIN_API();
  356. }
  357. else
  358. {
  359. mtCOVERAGE_TEST_MARKER();
  360. }
  361. /* The task blocked to wait for its required bits to be set - at this
  362. * point either the required bits were set or the block time expired. If
  363. * the required bits were set they will have been stored in the task's
  364. * event list item, and they should now be retrieved then cleared. */
  365. uxReturn = uxTaskResetEventItemValue();
  366. if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
  367. {
  368. taskENTER_CRITICAL();
  369. {
  370. /* The task timed out, just return the current event bit value. */
  371. uxReturn = pxEventBits->uxEventBits;
  372. /* It is possible that the event bits were updated between this
  373. * task leaving the Blocked state and running again. */
  374. if( prvTestWaitCondition( uxReturn, uxBitsToWaitFor, xWaitForAllBits ) != pdFALSE )
  375. {
  376. if( xClearOnExit != pdFALSE )
  377. {
  378. pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
  379. }
  380. else
  381. {
  382. mtCOVERAGE_TEST_MARKER();
  383. }
  384. }
  385. else
  386. {
  387. mtCOVERAGE_TEST_MARKER();
  388. }
  389. xTimeoutOccurred = pdTRUE;
  390. }
  391. taskEXIT_CRITICAL();
  392. }
  393. else
  394. {
  395. /* The task unblocked because the bits were set. */
  396. }
  397. /* The task blocked so control bits may have been set. */
  398. uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
  399. }
  400. traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred );
  401. /* Prevent compiler warnings when trace macros are not used. */
  402. ( void ) xTimeoutOccurred;
  403. return uxReturn;
  404. }
  405. /*-----------------------------------------------------------*/
  406. EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
  407. const EventBits_t uxBitsToClear )
  408. {
  409. EventGroup_t * pxEventBits = xEventGroup;
  410. EventBits_t uxReturn;
  411. /* Check the user is not attempting to clear the bits used by the kernel
  412. * itself. */
  413. configASSERT( xEventGroup );
  414. configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
  415. taskENTER_CRITICAL();
  416. {
  417. traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
  418. /* The value returned is the event group value prior to the bits being
  419. * cleared. */
  420. uxReturn = pxEventBits->uxEventBits;
  421. /* Clear the bits. */
  422. pxEventBits->uxEventBits &= ~uxBitsToClear;
  423. }
  424. taskEXIT_CRITICAL();
  425. return uxReturn;
  426. }
  427. /*-----------------------------------------------------------*/
  428. #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
  429. BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
  430. const EventBits_t uxBitsToClear )
  431. {
  432. BaseType_t xReturn;
  433. traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear );
  434. xReturn = xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */
  435. return xReturn;
  436. }
  437. #endif /* if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
  438. /*-----------------------------------------------------------*/
  439. EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
  440. {
  441. UBaseType_t uxSavedInterruptStatus;
  442. EventGroup_t const * const pxEventBits = xEventGroup;
  443. EventBits_t uxReturn;
  444. uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
  445. {
  446. uxReturn = pxEventBits->uxEventBits;
  447. }
  448. portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
  449. return uxReturn;
  450. } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */
  451. /*-----------------------------------------------------------*/
  452. EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
  453. const EventBits_t uxBitsToSet )
  454. {
  455. ListItem_t * pxListItem, * pxNext;
  456. ListItem_t const * pxListEnd;
  457. List_t const * pxList;
  458. EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits;
  459. EventGroup_t * pxEventBits = xEventGroup;
  460. BaseType_t xMatchFound = pdFALSE;
  461. /* Check the user is not attempting to set the bits used by the kernel
  462. * itself. */
  463. configASSERT( xEventGroup );
  464. configASSERT( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
  465. pxList = &( pxEventBits->xTasksWaitingForBits );
  466. pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
  467. vTaskSuspendAll();
  468. {
  469. traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
  470. pxListItem = listGET_HEAD_ENTRY( pxList );
  471. /* Set the bits. */
  472. pxEventBits->uxEventBits |= uxBitsToSet;
  473. /* See if the new bit value should unblock any tasks. */
  474. while( pxListItem != pxListEnd )
  475. {
  476. pxNext = listGET_NEXT( pxListItem );
  477. uxBitsWaitedFor = listGET_LIST_ITEM_VALUE( pxListItem );
  478. xMatchFound = pdFALSE;
  479. /* Split the bits waited for from the control bits. */
  480. uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES;
  481. uxBitsWaitedFor &= ~eventEVENT_BITS_CONTROL_BYTES;
  482. if( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) == ( EventBits_t ) 0 )
  483. {
  484. /* Just looking for single bit being set. */
  485. if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) != ( EventBits_t ) 0 )
  486. {
  487. xMatchFound = pdTRUE;
  488. }
  489. else
  490. {
  491. mtCOVERAGE_TEST_MARKER();
  492. }
  493. }
  494. else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor )
  495. {
  496. /* All bits are set. */
  497. xMatchFound = pdTRUE;
  498. }
  499. else
  500. {
  501. /* Need all bits to be set, but not all the bits were set. */
  502. }
  503. if( xMatchFound != pdFALSE )
  504. {
  505. /* The bits match. Should the bits be cleared on exit? */
  506. if( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) != ( EventBits_t ) 0 )
  507. {
  508. uxBitsToClear |= uxBitsWaitedFor;
  509. }
  510. else
  511. {
  512. mtCOVERAGE_TEST_MARKER();
  513. }
  514. /* Store the actual event flag value in the task's event list
  515. * item before removing the task from the event list. The
  516. * eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows
  517. * that is was unblocked due to its required bits matching, rather
  518. * than because it timed out. */
  519. vTaskRemoveFromUnorderedEventList( pxListItem, pxEventBits->uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET );
  520. }
  521. /* Move onto the next list item. Note pxListItem->pxNext is not
  522. * used here as the list item may have been removed from the event list
  523. * and inserted into the ready/pending reading list. */
  524. pxListItem = pxNext;
  525. }
  526. /* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT
  527. * bit was set in the control word. */
  528. pxEventBits->uxEventBits &= ~uxBitsToClear;
  529. }
  530. ( void ) xTaskResumeAll();
  531. return pxEventBits->uxEventBits;
  532. }
  533. /*-----------------------------------------------------------*/
  534. void vEventGroupDelete( EventGroupHandle_t xEventGroup )
  535. {
  536. configASSERT( xEventGroup );
  537. EventGroup_t * pxEventBits = xEventGroup;
  538. const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
  539. vTaskSuspendAll();
  540. {
  541. traceEVENT_GROUP_DELETE( xEventGroup );
  542. while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
  543. {
  544. /* Unblock the task, returning 0 as the event list is being deleted
  545. * and cannot therefore have any bits set. */
  546. configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
  547. vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
  548. }
  549. #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
  550. {
  551. /* The event group can only have been allocated dynamically - free
  552. * it again. */
  553. vPortFree( pxEventBits );
  554. }
  555. #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  556. {
  557. /* The event group could have been allocated statically or
  558. * dynamically, so check before attempting to free the memory. */
  559. if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
  560. {
  561. vPortFree( pxEventBits );
  562. }
  563. else
  564. {
  565. mtCOVERAGE_TEST_MARKER();
  566. }
  567. }
  568. #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
  569. }
  570. ( void ) xTaskResumeAll();
  571. }
  572. /*-----------------------------------------------------------*/
  573. /* For internal use only - execute a 'set bits' command that was pended from
  574. * an interrupt. */
  575. void vEventGroupSetBitsCallback( void * pvEventGroup,
  576. const uint32_t ulBitsToSet )
  577. {
  578. ( void ) xEventGroupSetBits( pvEventGroup, ( EventBits_t ) ulBitsToSet ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */
  579. }
  580. /*-----------------------------------------------------------*/
  581. /* For internal use only - execute a 'clear bits' command that was pended from
  582. * an interrupt. */
  583. void vEventGroupClearBitsCallback( void * pvEventGroup,
  584. const uint32_t ulBitsToClear )
  585. {
  586. ( void ) xEventGroupClearBits( pvEventGroup, ( EventBits_t ) ulBitsToClear ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */
  587. }
  588. /*-----------------------------------------------------------*/
  589. static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
  590. const EventBits_t uxBitsToWaitFor,
  591. const BaseType_t xWaitForAllBits )
  592. {
  593. BaseType_t xWaitConditionMet = pdFALSE;
  594. if( xWaitForAllBits == pdFALSE )
  595. {
  596. /* Task only has to wait for one bit within uxBitsToWaitFor to be
  597. * set. Is one already set? */
  598. if( ( uxCurrentEventBits & uxBitsToWaitFor ) != ( EventBits_t ) 0 )
  599. {
  600. xWaitConditionMet = pdTRUE;
  601. }
  602. else
  603. {
  604. mtCOVERAGE_TEST_MARKER();
  605. }
  606. }
  607. else
  608. {
  609. /* Task has to wait for all the bits in uxBitsToWaitFor to be set.
  610. * Are they set already? */
  611. if( ( uxCurrentEventBits & uxBitsToWaitFor ) == uxBitsToWaitFor )
  612. {
  613. xWaitConditionMet = pdTRUE;
  614. }
  615. else
  616. {
  617. mtCOVERAGE_TEST_MARKER();
  618. }
  619. }
  620. return xWaitConditionMet;
  621. }
  622. /*-----------------------------------------------------------*/
  623. #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
  624. BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
  625. const EventBits_t uxBitsToSet,
  626. BaseType_t * pxHigherPriorityTaskWoken )
  627. {
  628. BaseType_t xReturn;
  629. traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet );
  630. xReturn = xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */
  631. return xReturn;
  632. }
  633. #endif /* if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
  634. /*-----------------------------------------------------------*/
  635. #if ( configUSE_TRACE_FACILITY == 1 )
  636. UBaseType_t uxEventGroupGetNumber( void * xEventGroup )
  637. {
  638. UBaseType_t xReturn;
  639. EventGroup_t const * pxEventBits = ( EventGroup_t * ) xEventGroup; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */
  640. if( xEventGroup == NULL )
  641. {
  642. xReturn = 0;
  643. }
  644. else
  645. {
  646. xReturn = pxEventBits->uxEventGroupNumber;
  647. }
  648. return xReturn;
  649. }
  650. #endif /* configUSE_TRACE_FACILITY */
  651. /*-----------------------------------------------------------*/
  652. #if ( configUSE_TRACE_FACILITY == 1 )
  653. void vEventGroupSetNumber( void * xEventGroup,
  654. UBaseType_t uxEventGroupNumber )
  655. {
  656. ( ( EventGroup_t * ) xEventGroup )->uxEventGroupNumber = uxEventGroupNumber; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */
  657. }
  658. #endif /* configUSE_TRACE_FACILITY */
  659. /*-----------------------------------------------------------*/