diff --git a/CHANGE_NOTES b/CHANGE_NOTES index 7132d839..834620a7 100644 --- a/CHANGE_NOTES +++ b/CHANGE_NOTES @@ -52,6 +52,36 @@ Examples: - See em-odp/README for usage and compilation instructions. - See em-odp/include/event_machine/README_API for API changes +-------------------------------------------------------------------------------- +Event Machine (EM) on ODP v3.8.0 +-------------------------------------------------------------------------------- +- Support for EM API v3.8 (em-odp/include/), + see API additions and changes in em-odp/include/event_machine/README_API. + Summary: + * Timer: change the em_tmo_delete() API function and + enhance the timeout cancel and delete logic. + NOTE: Both an API and functional change, requires changes to + user code logic! + * Init: check that em_conf_init() has been called before em_init() + +- Fixes: + - timer: fix proper handling of the return values of odp_timer_cancel() + The odp_timer_cancel() return values changed in ODP v1.41.1. + The return value separates failure from expired timeout. + + - event, fix: update check for setting vector evtype + In em_event_set_type(), when setting a new event type for a vector event: + The new major event type is only used if EM_CHECK_LEVEL >= 1, thus move the + variable declaration inside the if-statment. + Avoids potential compilation warnings with EM_CHECK_LEVEL = 0. + +- Programs: + - New performance test program added. + - programs/performance/loop_united.c + Combines the existing performance test programs pairs, loop, loop_multircv, + loop_refs and loop_vectors into a single program that can run all the + individual tests via command line options. + -------------------------------------------------------------------------------- Event Machine (EM) on ODP v3.7.0 -------------------------------------------------------------------------------- diff --git a/configure.ac b/configure.ac old mode 100644 new mode 100755 index d882cd5d..19824955 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ AC_PREREQ([2.69]) # Version ############################ m4_define([em_version_api_major], [3]) -m4_define([em_version_api_minor], [7]) +m4_define([em_version_api_minor], [8]) m4_define([em_version_implementation], [0]) m4_define([em_version_fix], [0]) @@ -724,6 +724,7 @@ AC_MSG_RESULT([ am_cppflags: ${AM_CPPFLAGS} cflags: ${CFLAGS} am_cflags: ${AM_CFLAGS} + cxxflags: ${CXXFLAGS} am_cxxflags: ${AM_CXXFLAGS} ld: ${LD} ldflags: ${LDFLAGS} diff --git a/include/event_machine.h b/include/event_machine.h index 7096b841..11f2aece 100644 --- a/include/event_machine.h +++ b/include/event_machine.h @@ -1,306 +1,307 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015-2024, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_H -#define EVENT_MACHINE_H - -#pragma GCC visibility push(default) - -/** - * @file - * Event Machine API - * - * This file includes all other needed EM headers - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** @mainpage - * - * @section section_1 General - * Event Machine (EM) is a framework and an architectural abstraction of an - * event driven, multicore optimized, processing concept originally developed - * for the networking data plane. It offers an easy programming concept for - * scalable and dynamically load balanced multicore applications with a very - * low overhead run-to-completion principle. - * - * Events, queues and execution objects (EO) along with the scheduler and the - * dispatcher form the main elements of the EM concept. An event is an - * application specific piece of data (like a message or a network packet) - * describing work, something to do. All processing in EM must be triggered by - * an event. Events are sent to asynchronous application specific EM queues. - * A dispatcher loop is run by a single thread on each core in the EM instance - * ("core" is used here to refer to a core or one HW thread on multi-threaded - * cores). The dispatcher on each core interfaces with the scheduler and asks - * for an event to process. The scheduler then evaluates the state of all the - * EM queues and gives the highest priority event available to the requesting - * dispatcher. The dispatcher looks up which EO owns the queue that the event - * came from and finally calls the EO's registered receive function to deliver - * the event for processing. When the event has been handled and the EO's - * receive function returns, it's again time for the dispatcher on that core to - * request another event from the scheduler and deliver it to the corresponding - * EO. The aforedescribed scenario happens in parallel on all cores running the - * EM instance. Events originating from a particular queue might thus be given - * for processing on any core, decided separately for each event by the - * scheduler as the dispatcher on a core requests more work - this is per-event - * dynamic load-balancing. EM contains mechanisms to ensure atomicity and event - * (re-)ordering. - * - * The EM concept has been designed to be highly efficient, operating in a - * run-to-completion manner on each participating core with neither context - * switching nor pre-emption slowing down the event processing loops. - * EM can run on bare metal for best performance or under an operating system - * with special arrangements (e.g. one thread per core with thread affinity). - * - * The concept and the API are intended to allow fairly easy implementations on - * general purpose or networking oriented multicore packet processing SoCs, - * which typically also contain accelerators for packet processing needs. - * Efficient integration with modern HW accelerators has been a major driver of - * the EM concept. - * - * One general principle of the EM API is that the function calls are mostly - * multicore safe. The application still needs to consider parallel processing - * data hazards and race conditions unless explicitly documented in the API for - * the function call in question. For example, one core might ask for a queue - * context while another core changes it, thus the returned context may be - * invalid (valid data, but either the old or the new value is returned). Thus - * modifications of shared state or data should be protected by an atomic - * context (if load balancing is used) or otherwise synchronized by the - * application itself. One simple way to achieve atomic processing is to use an - * atomic queue to serialize the EO's incoming events and perform management - * operations in the EO's receive function. This serialization limits the - * throughput of the atomic queue in question to the equivalent throughput of a - * single core, but since normally EM applications use multiple queues, all - * cores should get events to process and the total throughput will be relative - * to the number of cores running the EM instance. - * - * EM_64_BIT or EM_32_BIT (needs to be defined by the build) defines whether - * (most of) the types used in the API are 32 or 64 bits wide. NOTE, that this - * is a major decision, since it may limit value passing between different - * systems using the defined types directly. Using 64-bits may allow for a more - * efficient underlying implementation, as e.g. more data can be coded in - * 64-bit identifiers. - * - * @section section_2 Principles - * - This API attempts to guide towards a portable application architecture, - * but is not defined for portability by re-compilation. Many things are system - * specific giving more possibilities for efficient use of HW resources. - * - EM does not define event content (one exception, see em_alloc()). This is - * a choice made for performance reasons, since most HW devices use proprietary - * descriptors. This API enables the usage of those directly. - * - EM does not define a detailed queue scheduling discipline or an API to set - * it up with (or actually anything to configure a system). The priority value - * in this API is a (mapped) system specific QoS class label only. - * - In general, EM does not implement a full SW platform or a middleware - * solution, it implements a subset - a driver level part. For best - * performance it can be used directly from the applications. - * - * @section section_3 Inter-system communication - * EM does not define how to communicate with another EM instance or another - * system transparently. However, this is a typical need and the current API - * does have ways to achieve almost transparent communication between systems - * ("event chaining"): - * Since the queue identifier is a system specific value, it is easy to encode - * extra information into it in the EM implementation. For instance it could be - * split into two parts, where the lower part is a local queue id or index and - * the higher part, if not zero, points to another system. The implementation - * of em_send() can detect a non-local queue and forward events to the target - * using any transport mechanism available and once at the target instance the - * lower part is used to map to a local queue. For the application nothing - * changes. The problem is the lack of shared memory between those systems. - * The given event can be fully copied, but it should not have any references to - * sender's local memory. Thus it is not fully transparent if the event contains - * references to local memory (e.g. pointers). - * - * @section section_4 Files - * @subsection sub_1 Generic - * - event_machine.h - * - Event Machine API - * The application should include this file only. - * - * Files included by event_machine.h: - * - event_machine_version.h - * - Event Machine version defines, macros and APIs - * - event_machine_deprecated.h - * - EM API deprecation defines & macros - * - event_machine_types.h - * - Event Machine basic types - * - event_machine_event.h - * - event related functionality - * - event_machine_packet.h - * - packet event related functionality - * - event_machine_eo.h - * - EO related functionality - * - event_machine_event_group.h - * - event group feature for fork-join type of operations using events - * - event_machine_atomic_group.h - * - functionality for atomic groups of queues (API 1.1) - * - event_machine_queue.h - * - queue related functionality - * - event_machine_queue_group.h - * - queue group related functionality - * - event_machine_error.h - * - error management related functionality - * - event_machine_core.h - * - core/thread related functionality - * - event_machine_scheduler.h - * - scheduling related functionality - * - event_machine_dispatcher.h - * - dispatching related functionality - * - event_machine_timer.h - * - timer APIs - * - * @subsection sub_2 Platform Specific - * (also included by event_machine.h) - * - event_machine_config.h - * - Event Machine constants and configuration options - * - event_machine_hw_config.h - * - HW specific constants and configuration options - * - event_machine_hw_specific.h - * - HW specific functions and macros - * - event_machine_hw_types.h - * - HW specific types - * - event_machine_hooks.h - * - API-hooks and idle-hooks - * - event_machine_init.h - * - Event Machine initialization - * - event_machine_pool.h - * - event pool related functionality - * - event_machine_timer_hw_specific.h - * - Platform specific timer definitions - * - * @subsection sub_3 Helper - * These files must be separately included by the application on a need basis. - * - event_machine_helper.h - * - optional helper routines - * - event_machine_debug.h - * - optional debug helpers (only for debug use) - * - * @subsection sub_4 Extensions - * These files must be separately included by the application on a need basis. - * - event_machine_odp_ext.h - * - EM <-> ODP conversion functions and ODP related helpers - * - * @example hello.c - * @example api_hooks.c - * @example dispatcher_callback.c - * @example error.c - * @example event_group.c - * @example event_group_abort.c - * @example event_group_assign_end.c - * @example event_group_chaining.c - * @example fractal.c - * @example ordered.c - * @example queue_types_ag.c - * @example queue_types_local.c - * @example queue_group.c - * @example timer_hello.c - * performance: - * @example atomic_processing_end.c - * @example loop.c - * @example loop_multircv.c - * @example loop_refs.c - * @example loop_vectors.c - * @example pairs.c - * @example pool_perf.c - * @example queue_groups.c - * @example queues.c - * @example queues_local.c - * @example queues_output.c - * @example queues_unscheduled.c - * @example scheduling_latency.c - * @example send_multi.c - * @example timer_test.c - * @example timer_test_periodic.c - * @example timer_test_ring.c - * bench: - * @example bench_event.c - * @example bench_pool.c - */ - -/* EM deprecated */ -#include - -/* EM version */ -#include - -/* EM config & types */ -#include -#include - -/* HW specific EM config & types */ -#include -#include - -/* EM error management */ -#include -/* EM Execution Object (EO) related functions */ -#include -/* EM Queue functions */ -#include -/* EM Queue Group functions */ -#include -/* EM Core functions*/ -#include -/* EM Event functions */ -#include -/* EM Packet Event functions */ -#include -/* EM Atomic Group functions */ -#include -/* EM Event Group functions */ -#include -/* EM Scheduler functions */ -#include -/* EM Dispatcher functions */ -#include - -/* EM Event Pool functions */ -#include -/* EM API hooks */ -#include -/* EM initialization and termination */ -#include -/* Other HW/Platform specific functions */ -#include -/* EM Timer HW/Platform specific */ -#include -/* EM Timer */ -#include - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_H */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015-2024, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_H +#define EVENT_MACHINE_H + +#pragma GCC visibility push(default) + +/** + * @file + * Event Machine API + * + * This file includes all other needed EM headers + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** @mainpage + * + * @section section_1 General + * Event Machine (EM) is a framework and an architectural abstraction of an + * event driven, multicore optimized, processing concept originally developed + * for the networking data plane. It offers an easy programming concept for + * scalable and dynamically load balanced multicore applications with a very + * low overhead run-to-completion principle. + * + * Events, queues and execution objects (EO) along with the scheduler and the + * dispatcher form the main elements of the EM concept. An event is an + * application specific piece of data (like a message or a network packet) + * describing work, something to do. All processing in EM must be triggered by + * an event. Events are sent to asynchronous application specific EM queues. + * A dispatcher loop is run by a single thread on each core in the EM instance + * ("core" is used here to refer to a core or one HW thread on multi-threaded + * cores). The dispatcher on each core interfaces with the scheduler and asks + * for an event to process. The scheduler then evaluates the state of all the + * EM queues and gives the highest priority event available to the requesting + * dispatcher. The dispatcher looks up which EO owns the queue that the event + * came from and finally calls the EO's registered receive function to deliver + * the event for processing. When the event has been handled and the EO's + * receive function returns, it's again time for the dispatcher on that core to + * request another event from the scheduler and deliver it to the corresponding + * EO. The aforedescribed scenario happens in parallel on all cores running the + * EM instance. Events originating from a particular queue might thus be given + * for processing on any core, decided separately for each event by the + * scheduler as the dispatcher on a core requests more work - this is per-event + * dynamic load-balancing. EM contains mechanisms to ensure atomicity and event + * (re-)ordering. + * + * The EM concept has been designed to be highly efficient, operating in a + * run-to-completion manner on each participating core with neither context + * switching nor pre-emption slowing down the event processing loops. + * EM can run on bare metal for best performance or under an operating system + * with special arrangements (e.g. one thread per core with thread affinity). + * + * The concept and the API are intended to allow fairly easy implementations on + * general purpose or networking oriented multicore packet processing SoCs, + * which typically also contain accelerators for packet processing needs. + * Efficient integration with modern HW accelerators has been a major driver of + * the EM concept. + * + * One general principle of the EM API is that the function calls are mostly + * multicore safe. The application still needs to consider parallel processing + * data hazards and race conditions unless explicitly documented in the API for + * the function call in question. For example, one core might ask for a queue + * context while another core changes it, thus the returned context may be + * invalid (valid data, but either the old or the new value is returned). Thus + * modifications of shared state or data should be protected by an atomic + * context (if load balancing is used) or otherwise synchronized by the + * application itself. One simple way to achieve atomic processing is to use an + * atomic queue to serialize the EO's incoming events and perform management + * operations in the EO's receive function. This serialization limits the + * throughput of the atomic queue in question to the equivalent throughput of a + * single core, but since normally EM applications use multiple queues, all + * cores should get events to process and the total throughput will be relative + * to the number of cores running the EM instance. + * + * EM_64_BIT or EM_32_BIT (needs to be defined by the build) defines whether + * (most of) the types used in the API are 32 or 64 bits wide. NOTE, that this + * is a major decision, since it may limit value passing between different + * systems using the defined types directly. Using 64-bits may allow for a more + * efficient underlying implementation, as e.g. more data can be coded in + * 64-bit identifiers. + * + * @section section_2 Principles + * - This API attempts to guide towards a portable application architecture, + * but is not defined for portability by re-compilation. Many things are system + * specific giving more possibilities for efficient use of HW resources. + * - EM does not define event content (one exception, see em_alloc()). This is + * a choice made for performance reasons, since most HW devices use proprietary + * descriptors. This API enables the usage of those directly. + * - EM does not define a detailed queue scheduling discipline or an API to set + * it up with (or actually anything to configure a system). The priority value + * in this API is a (mapped) system specific QoS class label only. + * - In general, EM does not implement a full SW platform or a middleware + * solution, it implements a subset - a driver level part. For best + * performance it can be used directly from the applications. + * + * @section section_3 Inter-system communication + * EM does not define how to communicate with another EM instance or another + * system transparently. However, this is a typical need and the current API + * does have ways to achieve almost transparent communication between systems + * ("event chaining"): + * Since the queue identifier is a system specific value, it is easy to encode + * extra information into it in the EM implementation. For instance it could be + * split into two parts, where the lower part is a local queue id or index and + * the higher part, if not zero, points to another system. The implementation + * of em_send() can detect a non-local queue and forward events to the target + * using any transport mechanism available and once at the target instance the + * lower part is used to map to a local queue. For the application nothing + * changes. The problem is the lack of shared memory between those systems. + * The given event can be fully copied, but it should not have any references to + * sender's local memory. Thus it is not fully transparent if the event contains + * references to local memory (e.g. pointers). + * + * @section section_4 Files + * @subsection sub_1 Generic + * - event_machine.h + * - Event Machine API + * The application should include this file only. + * + * Files included by event_machine.h: + * - event_machine_version.h + * - Event Machine version defines, macros and APIs + * - event_machine_deprecated.h + * - EM API deprecation defines & macros + * - event_machine_types.h + * - Event Machine basic types + * - event_machine_event.h + * - event related functionality + * - event_machine_packet.h + * - packet event related functionality + * - event_machine_eo.h + * - EO related functionality + * - event_machine_event_group.h + * - event group feature for fork-join type of operations using events + * - event_machine_atomic_group.h + * - functionality for atomic groups of queues (API 1.1) + * - event_machine_queue.h + * - queue related functionality + * - event_machine_queue_group.h + * - queue group related functionality + * - event_machine_error.h + * - error management related functionality + * - event_machine_core.h + * - core/thread related functionality + * - event_machine_scheduler.h + * - scheduling related functionality + * - event_machine_dispatcher.h + * - dispatching related functionality + * - event_machine_timer.h + * - timer APIs + * + * @subsection sub_2 Platform Specific + * (also included by event_machine.h) + * - event_machine_config.h + * - Event Machine constants and configuration options + * - event_machine_hw_config.h + * - HW specific constants and configuration options + * - event_machine_hw_specific.h + * - HW specific functions and macros + * - event_machine_hw_types.h + * - HW specific types + * - event_machine_hooks.h + * - API-hooks and idle-hooks + * - event_machine_init.h + * - Event Machine initialization + * - event_machine_pool.h + * - event pool related functionality + * - event_machine_timer_hw_specific.h + * - Platform specific timer definitions + * + * @subsection sub_3 Helper + * These files must be separately included by the application on a need basis. + * - event_machine_helper.h + * - optional helper routines + * - event_machine_debug.h + * - optional debug helpers (only for debug use) + * + * @subsection sub_4 Extensions + * These files must be separately included by the application on a need basis. + * - event_machine_odp_ext.h + * - EM <-> ODP conversion functions and ODP related helpers + * + * @example hello.c + * @example api_hooks.c + * @example dispatcher_callback.c + * @example error.c + * @example event_group.c + * @example event_group_abort.c + * @example event_group_assign_end.c + * @example event_group_chaining.c + * @example fractal.c + * @example ordered.c + * @example queue_types_ag.c + * @example queue_types_local.c + * @example queue_group.c + * @example timer_hello.c + * performance: + * @example atomic_processing_end.c + * @example loop.c + * @example loop_multircv.c + * @example loop_refs.c + * @example loop_vectors.c + * @example loop_united.c + * @example pairs.c + * @example pool_perf.c + * @example queue_groups.c + * @example queues.c + * @example queues_local.c + * @example queues_output.c + * @example queues_unscheduled.c + * @example scheduling_latency.c + * @example send_multi.c + * @example timer_test.c + * @example timer_test_periodic.c + * @example timer_test_ring.c + * bench: + * @example bench_event.c + * @example bench_pool.c + */ + +/* EM deprecated */ +#include + +/* EM version */ +#include + +/* EM config & types */ +#include +#include + +/* HW specific EM config & types */ +#include +#include + +/* EM error management */ +#include +/* EM Execution Object (EO) related functions */ +#include +/* EM Queue functions */ +#include +/* EM Queue Group functions */ +#include +/* EM Core functions*/ +#include +/* EM Event functions */ +#include +/* EM Packet Event functions */ +#include +/* EM Atomic Group functions */ +#include +/* EM Event Group functions */ +#include +/* EM Scheduler functions */ +#include +/* EM Dispatcher functions */ +#include + +/* EM Event Pool functions */ +#include +/* EM API hooks */ +#include +/* EM initialization and termination */ +#include +/* Other HW/Platform specific functions */ +#include +/* EM Timer HW/Platform specific */ +#include +/* EM Timer */ +#include + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_H */ diff --git a/include/event_machine/README_API b/include/event_machine/README_API index 370e1e5e..aea4d963 100644 --- a/include/event_machine/README_API +++ b/include/event_machine/README_API @@ -5,6 +5,43 @@ EM API Release Notes - See em-odp/CHANGE_NOTES for changed and added features other than API changes. - See em-odp/README for usage and compilation instructions. +-------------------------------------------------------------------------------- +API 3.8 (EM_VERSION_API_MAJOR=3, EM_VERSION_API_MINOR=8) +-------------------------------------------------------------------------------- +1. Timer: change the em_tmo_delete() API function and + enhance the timeout cancel and delete logic. + (see include/event_machine/api/event_machine_timer.h) + + NOTE: Both an API and functional change, requires changes to user code logic! + + Remove the possibility to delete an active timeout. + The API function em_tmo_delete() does not return the timeout event anymore. + Only an inactive timeout can be deleted. The timeout must be successfully + canceled, or the timeout must be expired and the corresponding timeout event + received, before deletion. The user must be prepared to receive the last + timeout event if cancelling the timeout fails (e.g. too late) and only after + that delete the timeout. + A periodic or a periodic ring timeout can be deleted after a successful + cancel, or if cancel fails with EM_ERR_TOONEAR when em_tmo_ack() has returned + EM_ERR_CANCELED - this indicates that the acknowledged timeout is canceled + and that it was the last timeout event coming for that periodic timeout. + The change is needed to make the API more robust and reliable. + +2. Init: check that em_conf_init() has been called before em_init() + (see include/event_machine/platform/event_machine_init.h) + + Verify that the user has called em_conf_init(conf) to initialize the + EM configuration with default values before calling em_init(conf), + otherwise an error is reported an em_init() returns failure. + + em_conf_t conf; + em_conf_init(&conf); /* Mandatory! */ + conf. ... = ...; /* Set needed EM options */ + ... + ret = em_init(&conf); /* Initialize EM once */ + ... + ret = em_init_core(); /* Initialize each EM-core separately */ + -------------------------------------------------------------------------------- API 3.7 (EM_VERSION_API_MAJOR=3, EM_VERSION_API_MINOR=7) -------------------------------------------------------------------------------- diff --git a/include/event_machine/api/event_machine_dispatcher.h b/include/event_machine/api/event_machine_dispatcher.h index bf26ef23..8c37b677 100644 --- a/include/event_machine/api/event_machine_dispatcher.h +++ b/include/event_machine/api/event_machine_dispatcher.h @@ -1,655 +1,669 @@ -/* - * Copyright (c) 2015-2023, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_DISPATCHER_H_ -#define EVENT_MACHINE_DISPATCHER_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_dispatcher Dispatcher - * Event Machine dispatcher related services. - * @{ - * - * The EM dispatcher contains the main loop of processing on each EM-core and - * interfaces with the scheduler to obtain events for processing. - * Further, the EM dispatcher is responsible for passing the events received - * on a core, from the scheduler, to the correct EO-receive function along with - * information about which queue the events originated from, what their types - * are etc. - * - * EM provides APIs to register, or unregister, dispatch callback hooks, i.e. - * user provided callback functions that will be run just before EM calls the - * EO-receive function or after returning from it. These callbacks are referred - * to as enter- and exit-callbacks respectively. - * The dispatch callbacks can be used to collect debug information, statistics - * or implement new functionality. The enter-callback is called before entering - * the EO-receive function on each core separately. The callback gets all the - * same arguments as the EO-receive function and can additionally modify them. - * The exit-callback works in a similar way, but is instead called after the - * EO-receive function returns and has no arguments except for the EO handle. - * Multiple callbacks can be registered. The calling order of multiple - * registered functions is the order of registration. If the same function is - * registered twice then it will be called twice. The max amount of simultaneous - * callbacks is set by the define 'EM_CALLBACKS_MAX'. - * If an enter-callback changes the event handle to UNDEF, the next callback - * will still be called with event as UNDEF, but the EO-receive function won't - * be called with an UNDEF event. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -/** - * @brief EM dispatch duration selection flags - * - * Combining (bitwise OR) several DURATION flags will instruct the EM dispatcher - * to dispatch until the first 'duration' condition is met, whichever happens - * first. - */ -typedef enum { - /** Select: dispatch forever, never return */ - EM_DISPATCH_DURATION_FOREVER = 0, - /** Select: dispatch until em_dispatch_opt_t::duration.rounds reached */ - EM_DISPATCH_DURATION_ROUNDS = 1, - /** Select: dispatch until em_dispatch_opt_t::duration.ns reached */ - EM_DISPATCH_DURATION_NS = 2, - /** Select: dispatch until em_dispatch_opt_t::duration.events reached */ - EM_DISPATCH_DURATION_EVENTS = 4, - - /** Select: dispatch until em_dispatch_opt_t::duration.no_events.rounds reached */ - EM_DISPATCH_DURATION_NO_EVENTS_ROUNDS = 8, - /** Select: dispatch until em_dispatch_opt_t::duration.no_events.ns reached */ - EM_DISPATCH_DURATION_NO_EVENTS_NS = 16, - - /* Keep last, for error checking */ - EM_DISPATCH_DURATION_LAST -} em_dispatch_duration_select_t; - -/** - * Dispatch duration. - * - * Select which dispatch duration, or combination, is to be used with the - * em_dispatch_duration() function. - * Bitwise OR .select-flags for a combination. - * Dispatch will end when one of the selected 'duration' options is - * reached, whichever is hit first. - */ -typedef struct { - /** - * Select which 'duration'-fields that should be taken into account - * when evaluating the em_dispatch_duration() run time. - * - * Only the duration fields that correspond to set .select-flags - * will be used. - */ - em_dispatch_duration_select_t select; - - /* - * Duration fields / values below considered according to .select-flags: - */ - - /** - * Dispatch for the given number of rounds, if used must be > 0. - * Only considered if .select contains EM_DISPATCH_DURATION_ROUNDS. - */ - uint64_t rounds; - - /** - * Dispatch (at least) for the given time in nanoseconds, - * if used must be > 0. - * Only considered if .select contains EM_DISPATCH_DURATION_NS. - * - * Using a large value for the option 'wait_ns' relative to .ns - * might delay the return from dispatch. - * - * The runtime of the EO-receive function for the last batch of events - * is not covered by .ns. - * EM will request new events to dispatch while the - * elapsed dispatch time is < .ns. - */ - uint64_t ns; - - /** - * Dispatch until (at least) the given number of events have been - * handled, if used must be > 0. - * Only considered if .select contains EM_DISPATCH_DURATION_EVENTS. - * - * Note that the option 'burst_size' affects the number of events - * dispatched. EM will request new events to dispatch while the number - * of dispatched events is < .events and then handle the whole burst. - * - * The option 'sched_pause=true' might also increase the number of - * events dispatched since the EM dispatcher needs to fetch and handle - * any leftover events held locally by the scheduler before returning. - */ - uint64_t events; - - struct { - /** - * Dispatch until no events have been received for the - * given number of rounds, if used must be > 0. - * Only considered if .select contains - * EM_DISPATCH_DURATION_NO_EVENTS_ROUNDS. - */ - uint64_t rounds; - - /** - * Dispatch until no events have been received for the - * given time in nanoseconds, if used must be > 0. - * Only considered if .select contains - * EM_DISPATCH_DURATION_NO_EVENTS_NS. - */ - uint64_t ns; - } no_events; -} em_dispatch_duration_t; - -/** - * @brief EM dispatch options - * - * The options must be initialized once with em_dispatch_opt_init() before - * using them with other em_dispatch_...() calls for the first time. Further - * calls to em_dispatch_...() with the same options structure do not need - * initialization and the user is allowed to modify the options between calls - * to change the dispatch behaviour. - * - * @see em_dispatch_opt_init(), em_dispatch_duration() etc. - */ -typedef struct { - /** - * Scheduler wait-for-events timeout in nanoseconds, might save power. - * The scheduler will wait for events, if no immediately available, for - * 'wait_ns' nanoseconds per scheduling / dispatch round. - * - * Note that using a large 'wait_ns' value relative to a - * dispatch duration in 'ns' might delay the return from dispatch. - * - * 0: do not wait for events (default) - */ - uint64_t wait_ns; - - /** - * Scheduler burst size. - * The max number of events the dispatcher will request in one burst - * from the scheduler. - * - * default: EM_SCHED_MULTI_MAX_BURST - */ - uint16_t burst_size; - - /** - * Override the possibly configured dispatcher input-polling callback - * (set via em_conf_t::input.input_poll_fn). - * - * false: Do not skip the input-poll callback if configured (default). - * true: Skip the input-poll callback in the dispatcher. - */ - bool skip_input_poll; /* override em_conf_t configuration */ - - /** - * Override the possibly configured dispatcher output-drain callback - * (set via em_conf_t::output.output_drain_fn). - * - * false: Do not skip the output-drain callback if configured (default). - * true: Skip the output-drain callback in the dispatcher. - */ - bool skip_output_drain; /* override em_conf_t configuration */ - - /** - * Pause the scheduler on the calling core when exiting the EM dispatch - * function. If enabled, will also resume the scheduling when entering - * dispatch. Pausing also implicitly causes the dispatcher to fetch and - * handle any leftover events held locally by the scheduler before - * returning. - * - * false: Do not pause and resume the scheduler when entering and - * exiting dispatch (default). - * true: Pause scheduling when exiting dispatch and resume scheduling - * when entering. EM will further empty and dispatch any remaining - * events locally stashed in the scheduler before returning - * causing some extra dispatch 'rounds' to be run. - */ - bool sched_pause; - - /** - * Internal check - don't touch! - * - * EM will verify that em_dispatch_opt_init(opt) has been called - * before use with dispatch functions. - */ - uint32_t __internal_check; -} em_dispatch_opt_t; - -/** - * @brief Dispatch results - * - * Output struct for returning the results of the em_dispatch_...() functions - * in. Usage of 'em_dispatch_results_t *results' with dispatch functions is - * optional and 'NULL' can be used if not interested in the results. - */ -typedef struct { - /** - * The number of dispatch rounds that were run. - */ - uint64_t rounds; - - /** - * The time in nanoseconds that dispatch was run. - * Only filled if requesting EM to dispatch for a certain amount of - * time, i.e. if EM_DISPATCH_DURATION_NS or - * EM_DISPATCH_DURATION_NO_EVENTS_NS duration selection flags were set - * in em_dispatch_duration_t::select when using em_dispatch_duration(). - * Also set when used with em_dispatch_ns(). - */ - uint64_t ns; - - /** - * The number of events that were dispatched. - */ - uint64_t events; -} em_dispatch_results_t; - -/** - * @brief Initialize the EM dispatch options. - * - * The options passed to em_dispatch_...() need to be initialized once before - * first use. Further calls to em_dispatch_...() with the same options structure - * do not need initialization and the user is allowed to modify the options - * between calls to change dispatch behaviour. - * - * This function may be called before em_init() or em_init_core() since it only - * sets the default values for the 'em_dispatch_opt_t *opt' argument. - * - * @param opt - */ -void em_dispatch_opt_init(em_dispatch_opt_t *opt); - -/** - * @brief Run the EM dispatcher for a certain duration with options. - * - * Called by an EM-core to dispatch (with options) events for EM processing. - * The EM dispatcher internally queries the scheduler for events for the - * calling EM-core and then dispatches them for processing, i.e. passes the - * events to the application EO's receive-function based on the queue the events - * were received / dequeued from. - * - * Combining (bitwise OR) several DURATION selection flags - * (see em_dispatch_duration_select_t) will dispatch until the first - * duration-condition is met, whichever happens first. - * - * Example usage: - * @code - * em_dispatch_duration_t duration; - * em_dispatch_opt_t opt; - * em_dispatch_results_t results; - * em_status_t status; - * - * em_dispatch_opt_init(&opt); // Mandatory once before first use! - * opt.wait_ns = 10000; // Wait max 10 us for events from scheduler - * opt.sched_pause = false; // Don't pause scheduling on return - * - * // Dispatch for 1000 rounds, 200 us or until 300 events have been - * // handled. Return when the first of these conditions is met. - * duration.select = EM_DISPATCH_DURATION_ROUNDS | - * EM_DISPATCH_DURATION_NS | - * EM_DISPATCH_DURATION_EVENTS; - * duration.rounds = 1000; - * duration.ns = 200000; // 200 us - * duration.events = 300; - * ... - * do { - // Dispatch until '.rounds' or '.ns' or '.events' reached - * status = em_dispatch_duration(&duration, &opt, &results); - * ... - * // Update 'duration' and 'opt' based on 'results' - * // and/or runtime conditions - * } while (do_dispatch(&results, ...)); - * - * // Prepare to leave EM dispatching - * duration.select = EM_DISPATCH_DURATION_NO_EVENTS_NS; - * duration.no_events.ns = 100000; - * opt.wait_ns = 0; // No waiting for events - * opt.skip_input_poll = true; // No callbacks - * opt.skip_output_drain = true; // -"- - * opt.sched_pause = true; // Pause scheduling on this EM-core - * - * status = em_dispatch_duration(&duration, &opt, &results); - * // Leave EM dispatching for a while - * @endcode - * - * @param duration Dispatch duration. - * @param opt Dispatch options (optional, can be NULL). - * If used, must have been initialized with - * em_dispatch_opt_init(). One initialization is enough, - * later calls to em_dispatch_...(...opt) can reuse (the - * possibly modified) 'opt'. - * Using NULL is the same as passing 'opt' initialized - * with em_dispatch_opt_init(&opt) without further changes. - * @param[out] results Dispatch results (optional, can be NULL). - * Filled for successful dispatch scenarios, i.e. when the - * return value is EM_OK. - * - * @return Error status code - * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) - * @retval other than EM_OK on error, 'result' is untouched - */ -em_status_t em_dispatch_duration(const em_dispatch_duration_t *duration, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out*/); -/** - * @brief Run the EM dispatcher for a given amount of time (in nanoseconds). - * - * Similar to em_dispatch_duration(), but with a simplified dispatch duration: - * here only the number of nanoseconds to dispatch is provided. - * - * Using a large value for 'opt.wait_ns' relative to 'ns' might delay the - * return from dispatch. - * - * The runtime of the EO-receive function for the last batch of events - * is not covered by 'ns'. - * EM will request new events to dispatch while the elapsed time is < 'ns'. - * - * @see em_dispatch_duration() for documentation and usage. - * - * @param ns Dispatch duration in nanoseconds. - * Note that 'ns=0' is not allowed! - * @param opt Dispatch options (optional, can be NULL). - * If used, must have been initialized with - * em_dispatch_opt_init(). One initialization is enough, - * later calls to em_dispatch_...(...opt) can reuse (the - * possibly modified) 'opt'. - * Using NULL is the same as passing 'opt' initialized - * with em_dispatch_opt_init(&opt) without further changes. - * @param[out] results Dispatch results (optional, can be NULL). - * Filled for successful dispatch scenarios, i.e. when the - * return value is EM_OK. - * - * @return Error status code - * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) - * @retval other than EM_OK on error, 'result' is untouched - */ -em_status_t em_dispatch_ns(uint64_t ns, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out*/); - -/** - * @brief Run the EM dispatcher until a given number of events have been - * dispatched. - * - * Similar to em_dispatch_duration(), but with a simplified dispatch duration: - * here only the number of events to dispatch is provided. - * - * Note that 'opt.burst_size' affects the number of events dispatched. - * EM will request new events to dispatch while the number of dispatched - * events is < .events and then handle the whole burst. - * - * The option 'opt.sched_pause=true' might also increase the number of - * events dispatched since the EM dispatcher needs to fetch and handle - * any leftover events held locally by the scheduler before returning. - * - * @see em_dispatch_duration() for documentation and usage. - * - * @param events Dispatch duration events. Dispatch until the given - * number of events have been dispatched. - * Note that 'events=0' is not allowed! - * @param opt Dispatch options (optional, can be NULL). - * If used, must have been initialized with - * em_dispatch_opt_init(). One initialization is enough, - * later calls to em_dispatch_...(...opt) can reuse (the - * possibly modified) 'opt'. - * Using NULL is the same as passing 'opt' initialized - * with em_dispatch_opt_init(&opt) without further changes. - * @param[out] results Dispatch results (optional, can be NULL). - * Filled for successful dispatch scenarios, i.e. when the - * return value is EM_OK. - * - * @return Error status code - * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) - * @retval other than EM_OK on error, 'result' is untouched - */ -em_status_t em_dispatch_events(uint64_t events, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out*/); - -/** - * @brief Run the EM dispatcher for a given number of dispatch-rounds. - * - * Similar to em_dispatch_duration(), but with a simplified dispatch duration: - * here only the number of rounds to dispatch is provided. - * - * @see em_dispatch_duration() for documentation and usage. - * - * @param rounds Dispatch duration rounds. Dispatch for the given number - * of rounds. - * Note that 'rounds=0' is not allowed! - * @param opt Dispatch options (optional, can be NULL). - * If used, must have been initialized with - * em_dispatch_opt_init(). One initialization is enough, - * later calls to em_dispatch_...(...opt) can reuse (the - * possibly modified) 'opt'. - * Using NULL is the same as passing 'opt' initialized - * with em_dispatch_opt_init(&opt) without further changes. - * @param[out] results Dispatch results (optional, can be NULL). - * Filled for successful dispatch scenarios, i.e. when the - * return value is EM_OK. - * - * @return Error status code - * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) - * @retval other than EM_OK on error, 'result' is untouched - */ -em_status_t em_dispatch_rounds(uint64_t rounds, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out*/); - -/** - * EM event dispatch - * - * Called by an EM-core to dispatch events for EM processing. - * The EM dispatcher internally queries the scheduler for events for the - * calling EM-core and then dispatches them for processing, i.e. passes the - * events to the application EO's receive-function based on the queue the events - * were received / dequeued from. - * - * See the EM config file for options controlling the global behaviour of - * em_dispatch(). - * - * @param rounds Dispatch rounds before returning, - * 0 means 'never return from dispatch' - * - * @return The number of events dispatched on this core. - * Only makes sense if 'rounds > 0' - * - * @see em_dispatch_duration() for a function that enables dispatching - * with more options. - */ -uint64_t em_dispatch(uint64_t rounds); - -/** - * Dispatcher global EO-receive enter-callback. - * - * Common dispatch callback run before EO-receive functions of both the - * em_receive_func_t and em_receive_multi_func_t types (i.e. for EOs created - * with either em_eo_create() or em_eo_create_multircv()). - * - * Enter-callbacks are run just before entering EO-receive functions, they can - * be useful for debugging, collecting statistics, manipulating events before - * they reach the EO or implementing new services needing synchronization - * between cores. - * Arguments common for both types of EO receive functions are passed as - * references to the enter-callback (the event-type passed to the single-event - * receive function case is not passed, use em_event_get/set_type() instead). - * Arguments are references, i.e. the callback can optionally modify them. - * If modified, the new values will go to the next callback and eventually to - * the multi-event EO-receive function. - * - * Events can be dropped by changing the event-entries in the events[num]-array - * to EM_EVENT_UNDEF. Neither EO-receive nor any further enter-callbacks will - * be called if all events have been dropped by the callbacks already run, i.e. - * no callback will be called with 'num=0'. - * The callback itself needs to handle the events it drops, e.g. free them. - * Note: EM will remove entries of EM_EVENT_UNDEF from the events[]-array before - * calling the next enter-callback (if several registered) or the - * receive function and adjust 'num' accordingly for the call. - * - * The EO handle can be used to separate callback functionality per EO and the - * core id can be obtained for core specific functionality. - * - * Callback functions can be called concurrently from different cores. - * - * @see em_dispatch_register_enter_cb() - */ -typedef void (*em_dispatch_enter_func_t)(em_eo_t eo, void **eo_ctx, - em_event_t events[/*in/out*/], int num, - em_queue_t *queue, void **q_ctx); - -/** - * Dispatcher global EO-receive exit-callback. - * - * The exit-callbacks are run after EO-receive returns. - * Some arguments given to EO-receive might not be valid afterwards, thus - * the only argument given to the exit callback is the EO handle. - * - * Callback functions can be called concurrently from different cores. - * - * @see em_dispatch_register_exit_cb() - */ -typedef void (*em_dispatch_exit_func_t)(em_eo_t eo); - -/** - * Register an EO-enter callback - * - * Register a global function to be called by the dispatcher just before calling - * an EO-receive function. This can be useful for debugging, collecting - * statistics, manipulating events before they reach the EO or implementing new - * services needing synchronization between cores. - * - * The function registered should be kept short since it will be run each time - * just before calling EO-receive. All registered callbacks will further - * increase the processing time. - * - * Multiple callbacks can be registered. - * The order of calling multiple registered functions is the order of - * registration. If same function is registered twice it will be called twice. - * The maximum number of simultaneous callbacks is system specific - * (EM_CALLBACKS_MAX). - * - * @param func Callback function - * - * @return EM_OK if callback registration succeeded - * - * @see em_dispatch_enter_func_t - */ -em_status_t -em_dispatch_register_enter_cb(em_dispatch_enter_func_t func); - -/** - * Unregister an EO-enter callback - * - * This can be used to unregister a previously registered enter-function. - * - * The given function is searched for and if found removed from the call list. - * If the same function has been registered multiple times, only one reference - * is removed per unregister call. - * Note that when this function returns, no new calls are made to the removed - * callback function, but it is still possible that another core could be - * executing the function, so care must be taken before removing anything it may - * still use. - * - * @param func Callback function - * - * @return EM_OK if the given function was found and removed. - */ -em_status_t -em_dispatch_unregister_enter_cb(em_dispatch_enter_func_t func); - -/** - * Register an EO-exit callback - * - * Register a global function to be called by the dispatcher just after return - * from an EO-receive function. - * - * The function registered should be kept short since it will be run each time - * just after EO-receive returns. All registered callbacks will further increase - * the processing time. - * - * Multiple callbacks can be registered. - * The order of calling multiple registered functions is the order of - * registration. If same function is registered twice it will be called twice. - * The maximum number of simultaneous callbacks is system specific - * (EM_CALLBACKS_MAX). - * - * @param func Callback function - * - * @return EM_OK if callback registration succeeded - * - * @see em_dispatch_register_enter_cb(), em_dispatch_unregister_exit_cb() - */ -em_status_t -em_dispatch_register_exit_cb(em_dispatch_exit_func_t func); - -/** - * Unregister an EO-exit callback - * - * This can be used to unregister a previously registered exit-function. - * - * Given function pointer is searched and if found removed from the call list. - * If one function is registered multiple times only one reference is removed. - * - * The given function is searched for and if found removed from the call list. - * If the same function has been registered multiple times, only one reference - * is removed per unregister call. - * Note that when this function returns, no new calls are made to the removed - * callback function, but it is still possible that another core could be - * executing the function, so care must be taken before removing anything it may - * still use. - * - * @param func Callback function - * - * @return EM_OK if the given function was found and removed. - * - * @see em_dispatch_exit_func_t - */ -em_status_t -em_dispatch_unregister_exit_cb(em_dispatch_exit_func_t func); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_DISPATCHER_H_ */ +/* + * Copyright (c) 2015-2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_DISPATCHER_H_ +#define EVENT_MACHINE_DISPATCHER_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_dispatcher Dispatcher + * Event Machine dispatcher related services. + * @{ + * + * The EM dispatcher contains the main loop of processing on each EM-core and + * interfaces with the scheduler to obtain events for processing. + * Further, the EM dispatcher is responsible for passing the events received + * on a core, from the scheduler, to the correct EO-receive function along with + * information about which queue the events originated from, what their types + * are etc. Different flavours of the EM dispatch APIs exist along with + * configuration options. + * + * EM provides APIs to register, or unregister, dispatch callback hooks, i.e. + * user provided callback functions that will be run just before EM calls the + * EO-receive function or after returning from it. These callbacks are referred + * to as dispatch enter- and exit-callbacks respectively. + * The dispatch callbacks can be used to collect debug information, statistics + * or implement new functionality. + * + * The dispatch enter-callbacks are called before entering the EO-receive + * function on each EM-core separately. Events can be dropped by an enter- + * callback. Neither the EO-receive function nor any further enter-callbacks + * will be called if all events have been dropped by the callbacks already run. + * The callback itself needs to handle the events it drops, e.g. free them. + * + * The dispatch exit-callbacks are called after the EO-receive function returns + * and have no arguments except for the EO handle. Note that all exit-callbacks + * are always called (even if the enter-callbacks dropped the events causing the + * rest of the enter-callbacks and the EO-receive function to be skipped). + * + * Multiple callbacks can be registered. The calling order of multiple + * registered callbacks is the order of registration. If the same function is + * registered twice then it will be called twice. The max amount of simultaneous + * callbacks is set by the define 'EM_CALLBACKS_MAX'. + * + * EM does not know of any connection or relationship between registered + * dispatch enter- and/or exit-callbacks. All dispatch callbacks are treated as + * independent entries. Functionality that e.g. depends on both an enter- and an + * exit-callback being run must take into acconut that the previous + * enter-callbacks might have dropped all events, thus skipping the following + * enter-callbacks - but still running all exit-callbacks. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * @brief EM dispatch duration selection flags + * + * Combining (bitwise OR) several DURATION flags will instruct the EM dispatcher + * to dispatch until the first 'duration' condition is met, whichever happens + * first. + */ +typedef enum { + /** Select: dispatch forever, never return */ + EM_DISPATCH_DURATION_FOREVER = 0, + /** Select: dispatch until em_dispatch_opt_t::duration.rounds reached */ + EM_DISPATCH_DURATION_ROUNDS = 1, + /** Select: dispatch until em_dispatch_opt_t::duration.ns reached */ + EM_DISPATCH_DURATION_NS = 2, + /** Select: dispatch until em_dispatch_opt_t::duration.events reached */ + EM_DISPATCH_DURATION_EVENTS = 4, + + /** Select: dispatch until em_dispatch_opt_t::duration.no_events.rounds reached */ + EM_DISPATCH_DURATION_NO_EVENTS_ROUNDS = 8, + /** Select: dispatch until em_dispatch_opt_t::duration.no_events.ns reached */ + EM_DISPATCH_DURATION_NO_EVENTS_NS = 16, + + /* Keep last, for error checking */ + EM_DISPATCH_DURATION_LAST +} em_dispatch_duration_select_t; + +/** + * Dispatch duration. + * + * Select which dispatch duration, or combination, is to be used with the + * em_dispatch_duration() function. + * Bitwise OR .select-flags for a combination. + * Dispatch will end when one of the selected 'duration' options is + * reached, whichever is hit first. + */ +typedef struct { + /** + * Select which 'duration'-fields that should be taken into account + * when evaluating the em_dispatch_duration() run time. + * + * Only the duration fields that correspond to set .select-flags + * will be used. + */ + em_dispatch_duration_select_t select; + + /* + * Duration fields / values below considered according to .select-flags: + */ + + /** + * Dispatch for the given number of rounds, if used must be > 0. + * Only considered if .select contains EM_DISPATCH_DURATION_ROUNDS. + */ + uint64_t rounds; + + /** + * Dispatch (at least) for the given time in nanoseconds, + * if used must be > 0. + * Only considered if .select contains EM_DISPATCH_DURATION_NS. + * + * Using a large value for the option 'wait_ns' relative to .ns + * might delay the return from dispatch. + * + * The runtime of the EO-receive function for the last batch of events + * is not covered by .ns. + * EM will request new events to dispatch while the + * elapsed dispatch time is < .ns. + */ + uint64_t ns; + + /** + * Dispatch until (at least) the given number of events have been + * handled, if used must be > 0. + * Only considered if .select contains EM_DISPATCH_DURATION_EVENTS. + * + * Note that the option 'burst_size' affects the number of events + * dispatched. EM will request new events to dispatch while the number + * of dispatched events is < .events and then handle the whole burst. + * + * The option 'sched_pause=true' might also increase the number of + * events dispatched since the EM dispatcher needs to fetch and handle + * any leftover events held locally by the scheduler before returning. + */ + uint64_t events; + + struct { + /** + * Dispatch until no events have been received for the + * given number of rounds, if used must be > 0. + * Only considered if .select contains + * EM_DISPATCH_DURATION_NO_EVENTS_ROUNDS. + */ + uint64_t rounds; + + /** + * Dispatch until no events have been received for the + * given time in nanoseconds, if used must be > 0. + * Only considered if .select contains + * EM_DISPATCH_DURATION_NO_EVENTS_NS. + */ + uint64_t ns; + } no_events; +} em_dispatch_duration_t; + +/** + * @brief EM dispatch options + * + * The options must be initialized once with em_dispatch_opt_init() before + * using them with other em_dispatch_...() calls for the first time. Further + * calls to em_dispatch_...() with the same options structure do not need + * initialization and the user is allowed to modify the options between calls + * to change the dispatch behaviour. + * + * @see em_dispatch_opt_init(), em_dispatch_duration() etc. + */ +typedef struct { + /** + * Scheduler wait-for-events timeout in nanoseconds, might save power. + * The scheduler will wait for events, if no immediately available, for + * 'wait_ns' nanoseconds per scheduling / dispatch round. + * + * Note that using a large 'wait_ns' value relative to a + * dispatch duration in 'ns' might delay the return from dispatch. + * + * 0: do not wait for events (default) + */ + uint64_t wait_ns; + + /** + * Scheduler burst size. + * The max number of events the dispatcher will request in one burst + * from the scheduler. + * + * default: EM_SCHED_MULTI_MAX_BURST + */ + uint16_t burst_size; + + /** + * Override the possibly configured dispatcher input-polling callback + * (set via em_conf_t::input.input_poll_fn). + * + * false: Do not skip the input-poll callback if configured (default). + * true: Skip the input-poll callback in the dispatcher. + */ + bool skip_input_poll; /* override em_conf_t configuration */ + + /** + * Override the possibly configured dispatcher output-drain callback + * (set via em_conf_t::output.output_drain_fn). + * + * false: Do not skip the output-drain callback if configured (default). + * true: Skip the output-drain callback in the dispatcher. + */ + bool skip_output_drain; /* override em_conf_t configuration */ + + /** + * Pause the scheduler on the calling core when exiting the EM dispatch + * function. If enabled, will also resume the scheduling when entering + * dispatch. Pausing also implicitly causes the dispatcher to fetch and + * handle any leftover events held locally by the scheduler before + * returning. + * + * false: Do not pause and resume the scheduler when entering and + * exiting dispatch (default). + * true: Pause scheduling when exiting dispatch and resume scheduling + * when entering. EM will further empty and dispatch any remaining + * events locally stashed in the scheduler before returning + * causing some extra dispatch 'rounds' to be run. + */ + bool sched_pause; + + /** + * Internal check - don't touch! + * + * EM will verify that em_dispatch_opt_init(opt) has been called + * before use with dispatch functions. + */ + uint32_t __internal_check; +} em_dispatch_opt_t; + +/** + * @brief Dispatch results + * + * Output struct for returning the results of the em_dispatch_...() functions + * in. Usage of 'em_dispatch_results_t *results' with dispatch functions is + * optional and 'NULL' can be used if not interested in the results. + */ +typedef struct { + /** + * The number of dispatch rounds that were run. + */ + uint64_t rounds; + + /** + * The time in nanoseconds that dispatch was run. + * Only filled if requesting EM to dispatch for a certain amount of + * time, i.e. if EM_DISPATCH_DURATION_NS or + * EM_DISPATCH_DURATION_NO_EVENTS_NS duration selection flags were set + * in em_dispatch_duration_t::select when using em_dispatch_duration(). + * Also set when used with em_dispatch_ns(). + */ + uint64_t ns; + + /** + * The number of events that were dispatched. + */ + uint64_t events; +} em_dispatch_results_t; + +/** + * @brief Initialize the EM dispatch options. + * + * The options passed to em_dispatch_...() need to be initialized once before + * first use. Further calls to em_dispatch_...() with the same options structure + * do not need initialization and the user is allowed to modify the options + * between calls to change dispatch behaviour. + * + * This function may be called before em_init() or em_init_core() since it only + * sets the default values for the 'em_dispatch_opt_t *opt' argument. + * + * @param opt + */ +void em_dispatch_opt_init(em_dispatch_opt_t *opt); + +/** + * @brief Run the EM dispatcher for a certain duration with options. + * + * Called by an EM-core to dispatch (with options) events for EM processing. + * The EM dispatcher internally queries the scheduler for events for the + * calling EM-core and then dispatches them for processing, i.e. passes the + * events to the application EO's receive-function based on the queue the events + * were received / dequeued from. + * + * Combining (bitwise OR) several DURATION selection flags + * (see em_dispatch_duration_select_t) will dispatch until the first + * duration-condition is met, whichever happens first. + * + * Example usage: + * @code + * em_dispatch_duration_t duration; + * em_dispatch_opt_t opt; + * em_dispatch_results_t results; + * em_status_t status; + * + * em_dispatch_opt_init(&opt); // Mandatory once before first use! + * opt.wait_ns = 10000; // Wait max 10 us for events from scheduler + * opt.sched_pause = false; // Don't pause scheduling on return + * + * // Dispatch for 1000 rounds, 200 us or until 300 events have been + * // handled. Return when the first of these conditions is met. + * duration.select = EM_DISPATCH_DURATION_ROUNDS | + * EM_DISPATCH_DURATION_NS | + * EM_DISPATCH_DURATION_EVENTS; + * duration.rounds = 1000; + * duration.ns = 200000; // 200 us + * duration.events = 300; + * ... + * do { + * // Dispatch until '.rounds' or '.ns' or '.events' reached + * status = em_dispatch_duration(&duration, &opt, &results); + * ... + * // Update 'duration' and 'opt' based on 'results' + * // and/or runtime conditions + * } while (do_dispatch(&results, ...)); + * + * // Prepare to leave EM dispatching + * duration.select = EM_DISPATCH_DURATION_NO_EVENTS_NS; + * duration.no_events.ns = 100000; + * opt.wait_ns = 0; // No waiting for events + * opt.skip_input_poll = true; // No callbacks + * opt.skip_output_drain = true; // -"- + * opt.sched_pause = true; // Pause scheduling on this EM-core + * + * status = em_dispatch_duration(&duration, &opt, &results); + * // Leave EM dispatching for a while + * @endcode + * + * @param duration Dispatch duration. + * @param opt Dispatch options (optional, can be NULL). + * If used, must have been initialized with + * em_dispatch_opt_init(). One initialization is enough, + * later calls to em_dispatch_...(...opt) can reuse (the + * possibly modified) 'opt'. + * Using NULL is the same as passing 'opt' initialized + * with em_dispatch_opt_init(&opt) without further changes. + * @param[out] results Dispatch results (optional, can be NULL). + * Filled for successful dispatch scenarios, i.e. when the + * return value is EM_OK. + * + * @return Error status code + * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) + * @retval other than EM_OK on error, 'result' is untouched + */ +em_status_t em_dispatch_duration(const em_dispatch_duration_t *duration, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out*/); +/** + * @brief Run the EM dispatcher for a given amount of time (in nanoseconds). + * + * Similar to em_dispatch_duration(), but with a simplified dispatch duration: + * here only the number of nanoseconds to dispatch is provided. + * + * Using a large value for 'opt.wait_ns' relative to 'ns' might delay the + * return from dispatch. + * + * The runtime of the EO-receive function for the last batch of events + * is not covered by 'ns'. + * EM will request new events to dispatch while the elapsed time is < 'ns'. + * + * @see em_dispatch_duration() for documentation and usage. + * + * @param ns Dispatch duration in nanoseconds. + * Note that 'ns=0' is not allowed! + * @param opt Dispatch options (optional, can be NULL). + * If used, must have been initialized with + * em_dispatch_opt_init(). One initialization is enough, + * later calls to em_dispatch_...(...opt) can reuse (the + * possibly modified) 'opt'. + * Using NULL is the same as passing 'opt' initialized + * with em_dispatch_opt_init(&opt) without further changes. + * @param[out] results Dispatch results (optional, can be NULL). + * Filled for successful dispatch scenarios, i.e. when the + * return value is EM_OK. + * + * @return Error status code + * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) + * @retval other than EM_OK on error, 'result' is untouched + */ +em_status_t em_dispatch_ns(uint64_t ns, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out*/); + +/** + * @brief Run the EM dispatcher until a given number of events have been + * dispatched. + * + * Similar to em_dispatch_duration(), but with a simplified dispatch duration: + * here only the number of events to dispatch is provided. + * + * Note that 'opt.burst_size' affects the number of events dispatched. + * EM will request new events to dispatch while the number of dispatched + * events is < .events and then handle the whole burst. + * + * The option 'opt.sched_pause=true' might also increase the number of + * events dispatched since the EM dispatcher needs to fetch and handle + * any leftover events held locally by the scheduler before returning. + * + * @see em_dispatch_duration() for documentation and usage. + * + * @param events Dispatch duration events. Dispatch until the given + * number of events have been dispatched. + * Note that 'events=0' is not allowed! + * @param opt Dispatch options (optional, can be NULL). + * If used, must have been initialized with + * em_dispatch_opt_init(). One initialization is enough, + * later calls to em_dispatch_...(...opt) can reuse (the + * possibly modified) 'opt'. + * Using NULL is the same as passing 'opt' initialized + * with em_dispatch_opt_init(&opt) without further changes. + * @param[out] results Dispatch results (optional, can be NULL). + * Filled for successful dispatch scenarios, i.e. when the + * return value is EM_OK. + * + * @return Error status code + * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) + * @retval other than EM_OK on error, 'result' is untouched + */ +em_status_t em_dispatch_events(uint64_t events, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out*/); + +/** + * @brief Run the EM dispatcher for a given number of dispatch-rounds. + * + * Similar to em_dispatch_duration(), but with a simplified dispatch duration: + * here only the number of rounds to dispatch is provided. + * + * @see em_dispatch_duration() for documentation and usage. + * + * @param rounds Dispatch duration rounds. Dispatch for the given number + * of rounds. + * Note that 'rounds=0' is not allowed! + * @param opt Dispatch options (optional, can be NULL). + * If used, must have been initialized with + * em_dispatch_opt_init(). One initialization is enough, + * later calls to em_dispatch_...(...opt) can reuse (the + * possibly modified) 'opt'. + * Using NULL is the same as passing 'opt' initialized + * with em_dispatch_opt_init(&opt) without further changes. + * @param[out] results Dispatch results (optional, can be NULL). + * Filled for successful dispatch scenarios, i.e. when the + * return value is EM_OK. + * + * @return Error status code + * @retval EM_OK when dispatch was successful, 'result' is filled (if provided) + * @retval other than EM_OK on error, 'result' is untouched + */ +em_status_t em_dispatch_rounds(uint64_t rounds, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out*/); + +/** + * EM event dispatch + * + * Called by an EM-core to dispatch events for EM processing. + * The EM dispatcher internally queries the scheduler for events for the + * calling EM-core and then dispatches them for processing, i.e. passes the + * events to the application EO's receive-function based on the queue the events + * were received / dequeued from. + * + * See the EM config file for options controlling the global behaviour of + * em_dispatch(). + * + * @param rounds Dispatch rounds before returning, + * 0 means 'never return from dispatch' + * + * @return The number of events dispatched on this core. + * Only makes sense if 'rounds > 0' + * + * @see em_dispatch_duration() for a function that enables dispatching + * with more options. + */ +uint64_t em_dispatch(uint64_t rounds); + +/** + * Dispatch enter-callback. + * + * Common dispatch callback run before EO-receive functions of both the + * em_receive_func_t and em_receive_multi_func_t types (i.e. for EOs created + * with either em_eo_create() or em_eo_create_multircv()). + * + * Enter-callbacks are run just before entering EO-receive functions, they can + * be useful for debugging, collecting statistics, manipulating events before + * they reach the EO or implementing new services needing synchronization + * between cores. + * Some of the arguments common for both types of EO receive functions are + * passed as pointers to the enter-callback so that the callback can optionally + * modify them. If modified, the new values will go to the next callback and + * eventually to the EO-receive function. + * + * Events can be dropped by changing the event-entries in the events[num]-array + * to EM_EVENT_UNDEF. Neither EO-receive nor any further enter-callbacks will + * be called if all events have been dropped by the callbacks already run, i.e. + * no callback will be called with 'num=0'. + * The callback itself needs to handle the events it drops, e.g. free them. + * Note: EM will remove entries of EM_EVENT_UNDEF from the events[]-array before + * calling the next enter-callback (if several registered) or the + * receive function and adjust 'num' accordingly for the call. + * + * Functionality that e.g. depends on both an enter- and an exit-callback being + * run must take into acconut that the previous enter-callbacks might have + * dropped all events, thus skipping the following enter-callbacks - but still + * running all exit-callbacks. + * + * The EO handle can be used to separate callback functionality per EO and the + * core id can be obtained for core specific functionality. + * + * Callback functions can be called concurrently from different cores. + * + * @see em_dispatch_register_enter_cb() + */ +typedef void (*em_dispatch_enter_func_t)(em_eo_t eo, void **eo_ctx, + em_event_t events[/*in/out*/], int num, + em_queue_t *queue, void **q_ctx); + +/** + * Dispatcher exit-callback. + * + * The exit-callbacks are run after EO-receive returns. + * Some arguments given to EO-receive might not be valid afterwards, thus + * the only argument given to the exit callback is the EO handle. + * + * Callback functions can be called concurrently from different cores. + * + * Functionality that e.g. depends on both an enter- and an exit-callback being + * run must take into acconut that the previous enter-callbacks might have + * dropped all events, thus skipping the following enter-callbacks - but still + * running all exit-callbacks. + * + * @see em_dispatch_register_exit_cb() + */ +typedef void (*em_dispatch_exit_func_t)(em_eo_t eo); + +/** + * Register a dispatch enter-callback + * + * Register a global function to be called by the dispatcher just before calling + * an EO-receive function. This can be useful for debugging, collecting + * statistics, manipulating events before they reach the EO or implementing new + * services needing synchronization between cores. + * + * The function registered should be kept short since it will be run each time + * just before calling EO-receive. All registered callbacks will further + * increase the processing time. + * + * Multiple callbacks can be registered. + * The order of calling multiple registered functions is the order of + * registration. If same function is registered twice it will be called twice. + * The maximum number of simultaneous callbacks is system specific + * (EM_CALLBACKS_MAX). + * + * @param func Dispatch enter-callback function + * + * @return EM_OK if callback registration succeeded + * + * @see em_dispatch_enter_func_t for further documentation + */ +em_status_t em_dispatch_register_enter_cb(em_dispatch_enter_func_t func); + +/** + * Unregister a dispatch enter-callback + * + * This can be used to unregister a previously registered enter-callback. + * + * The given function is searched for and, if found, removed from the call list. + * If the same function has been registered multiple times, only one reference + * is removed per unregister call. + * Note that when this function returns, no new calls are made to the removed + * callback function, but it is still possible that another core could be + * executing the function, so care must be taken before removing anything it may + * still use. + * + * @param func Dispatch enter-callback function + * + * @return EM_OK if the given function was found and removed. + */ +em_status_t em_dispatch_unregister_enter_cb(em_dispatch_enter_func_t func); + +/** + * Register a dispatch exit-callback + * + * Register a global function to be called by the dispatcher just after return + * from an EO-receive function. + * + * The function registered should be kept short since it will be run each time + * just after EO-receive returns. All registered callbacks will further increase + * the processing time. + * + * Multiple callbacks can be registered. + * The order of calling multiple registered functions is the order of + * registration. If same function is registered twice it will be called twice. + * The maximum number of simultaneous callbacks is system specific + * (EM_CALLBACKS_MAX). + * + * @param func Dispatch exit-callback function + * + * @return EM_OK if callback registration succeeded + * + * @see em_dispatch_exit_func_t for further documentation + */ +em_status_t em_dispatch_register_exit_cb(em_dispatch_exit_func_t func); + +/** + * Unregister a dispatch exit-callback + * + * This can be used to unregister a previously registered exit-callback. + * + * The given function is searched for and, if found, removed from the call list. + * If the same function has been registered multiple times, only one reference + * is removed per unregister call. + * Note that when this function returns, no new calls are made to the removed + * callback function, but it is still possible that another core could be + * executing the function, so care must be taken before removing anything it may + * still use. + * + * @param func Dispatch exit-callback function + * + * @return EM_OK if the given function was found and removed. + * + * @see em_dispatch_exit_func_t + */ +em_status_t em_dispatch_unregister_exit_cb(em_dispatch_exit_func_t func); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_DISPATCHER_H_ */ diff --git a/include/event_machine/api/event_machine_eo.h b/include/event_machine/api/event_machine_eo.h index d5e43b12..056bfb31 100644 --- a/include/event_machine/api/event_machine_eo.h +++ b/include/event_machine/api/event_machine_eo.h @@ -1,982 +1,983 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_EO_H_ -#define EVENT_MACHINE_EO_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_eo Execution objects (EO) - * - * Operations on EO - * - * Execution objects (EO) are the application building blocks of EM. - * An EO typically implements one logical function or one stage in a pipeline, - * but alternatively the whole application could be implemented with one EO. - * EOs work as servers, queues are the service access points (inputs to the EO). - * - * An EO consists of user provided callback functions and context data. - * The most important function is the receive function, which gets called - * when an event is received from one of the queues associated with the EO. - * The EM scheduler selects the next event for processing on a core and the - * EM dispatcher on that core maps the received event and queue information to - * an EO receive function to call to process the event. - * Other EO functions are used to manage start-up and teardown of EOs. See - * individual EO functions for more details. - * - * em_eo_create() - * | - * v - * .-------------. - * .->.------->| CREATED | (new events discarded) - * | | '-------------' - * | | | em_eo_start(+notifs) / em_eo_start_sync() - * | | v - * | | .-------------. - * | | | STARTING | (new events discarded) - * | ' '-------------' - * | \ global start - * | \ THEN - * | \ local start on each core - * | '--- FAIL OK - * | | send 'start-completed' notifications - * | v - * . .-------------. - * | | RUNNING | - * | '-------------' - * | | em_eo_stop(+notifs) / em_eo_stop_sync() - * | v - * ' .-------------. - * \ | STOPPING | (new events discarded) - * \ '-------------' - * \ | - * \ v - * \ local stops on each core - * \ THEN - * \ global stops - * \ . - * \ / - * -------' send 'stop-completed' notifications - * - * @{ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -/** - * Execution object (EO) event receive function (single-event) - * - * An application receives events through queues and these events are passed to - * the application's EO receive function(s) for processing. The EO receive - * function implements the main part of the application logic. EM calls the - * receive function when it has dequeued an event from one of the EO's queues. - * The application then processes the event and returns immediately in a - * run-to-completion fashion. There is no pre-emption. - * - * On multicore systems, several events (from the same or different queue) may - * be dequeued in parallel and thus the same receive function may be executed - * concurrently on several cores. Parallel execution may be limited by queue - * group setup or by using queues with an atomic scheduling mode. - * - * The EO and queue context pointers are user defined. The EO context is given - * at EO creation and the queue context is set with em_queue_set_context(). - * These contexts may be used in any way needed, the EM implementation will not - * dereference them. For example, the EO context may be used to store global - * EO state information, which is common to all queues and events for that EO. - * In addition, the queue context may be used to store queue specific state data - * (e.g. user data flow related data). The queue context data for an atomic - * queue can be freely manipulated in the receive function, since only one event - * at a time can be under work from that particular atomic queue. For other - * queue types it is up to the user to synchronize context access. The EO - * context is protected only if the EO has one queue and it is of type 'atomic' - * (applies also to several atomic queues that belong to the same atomic group). - * - * An event (handle) must be converted to an event structure pointer with - * em_event_pointer() before accessing any data it may contain. - * The event type specifies the event structure in memory, which is - * implementation or application specific. - * The queue handle specifies the queue where the event was dequeued from. - * - * The EO will not receive any events if it has not been successfully started. - * - * @param eo_ctx EO context data given to em_eo_create(), - * EM does not dereference. - * @param event Event handle - * @param type Event type - * @param queue Queue from which the event was dequeued - * @param q_ctx Queue context data. The context pointer is set by - * em_queue_set_context(), EM does not touch the data. - * - * @see em_eo_create(), - * em_alloc(), em_free(), em_send(), - * em_event_pointer(), em_queue_set_context() - */ -typedef void (*em_receive_func_t)(void *eo_ctx, - em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -/** - * Execution object (EO) multi-event receive function - * - * Similar to the single-event receive function (em_receive_func_t), except that - * multiple events can be passed with one call to the EO receive function. - * A multi-event receive function is taken into use during EO creation with a - * call to em_eo_create_multircv(...). The maximum number of events that the - * multi-event EO receive function is prepared to handle can be passed with the - * argument 'max_events' of em_eo_create_multircv(). The EM dispatcher will - * split event batches larger than 'max_events' into chunks of 'max_events'. - * - * Event group handling: - * All events passed by the EM dispatcher to the EO multi-event receive function - * belong to the same event group (or none) - a batch of events containing - * multiple event groups is split by the dispatcher into smaller chunks, each - * chunk belonging to the same event group (or none). - * The event group count is decremented by the number of events passed to the - * receive function when execution returns to the dispatcher. - * - * Note: Contrary to the single-event EO receive function (em_receive_func_t), - * no event types are passed. Use appropriate event APIs if the event types - * are needed. - * - * @param eo_ctx EO context data given to em_eo_create_multircv(), - * EM does not dereference. - * @param events Event handles: events[num] - * @param num Number of events received - * (0 to 'max_events' of em_eo_create_multircv()) - * @param queue Queue from which the event was dequeued - * @param q_ctx Queue context data. The context pointer is set by - * em_queue_set_context(), EM does not touch the data. - * - * @see em_eo_create_multircv(), - * em_alloc(), em_free(), em_send(), - * em_event_pointer(), em_queue_set_context() - */ -typedef void (*em_receive_multi_func_t)(void *eo_ctx, - em_event_t events[], int num, - em_queue_t queue, void *q_ctx); - -/** - * Execution object (EO) start function, global. - * - * This EO callback function is called once on one core by em_eo_start(). - * The purpose of this global EO-start is to provide a placeholder for first - * level EO initialization, e.g. allocating memory and initializing shared data. - * After this global start returns, the EO core local start function (if given) - * is called on all cores in this EM instance. If there is no core local start, - * then event dispatching is enabled as this function returns, otherwise the EO - * is enabled only when all core local starts have completed successfully on all - * the cores. If this function does not return EM_OK, the system will not call - * the core local init and will not enable event dispatching for this EO. - * - * Note that events sent to scheduled queues from a start function are - * buffered. The buffered events will be sent into the queues when the EO start - * functions have returned - otherwise it would not be possible to send events - * to the EO's own queues as the EO is not yet in a started state. No buffering - * is done when sending to queues that are not scheduled. - * - * The last argument is an optional startup configuration passed directly - * from em_eo_start(). If local start functions need the configuration data, - * it must be saved during the global start. - * - * This function should never be directly called from the application, - * it will be called by em_eo_start(), which maintains state information. - * - * @param eo_ctx Execution object internal state/instance data - * @param eo Execution object handle - * @param conf Optional startup configuration, NULL ok. - * - * @return EM_OK if successful, other values abort EO start - * - * @see em_eo_start(), em_eo_create() - */ -typedef em_status_t (*em_start_func_t)(void *eo_ctx, em_eo_t eo, - const em_eo_conf_t *conf); - -/** - * Execution object (EO) start function, core local. - * - * This is similar to the global start above, but this one is called after the - * global start has completed and is run on all cores of the EM instance - * potentially in parallel. - * - * The purpose of this optional local start is to work as a placeholder for - * core local initialization, e.g. allocating core local memory. - * - * Note that events sent to scheduled queues from local start functions are - * buffered. The buffered events will be sent into the queues when the EO start - * functions have returned - otherwise it would not be possible to send events - * to the EO's own queues as the EO is not yet in a started state. No buffering - * is done when sending to queues that are not scheduled. - * - * This function should never be directly called from the application, - * it will be called by em_eo_start(), which maintains state information. - * - * Event dispatching is not enabled if this function doesn't return EM_OK on - * all cores. - * - * @param eo_ctx Execution object internal state/instance data - * @param eo Execution object handle - * - * @return EM_OK if successful, other values prevent EO start - * - * @see em_eo_start(), em_eo_create() - */ -typedef em_status_t (*em_start_local_func_t)(void *eo_ctx, em_eo_t eo); - -/** - * Execution object (EO) stop function, core local. - * - * This function is called once on each core of the EM instance before the - * global stop (reverse order of start). The system disables event dispatching - * before calling these and also makes sure this does not get called before - * the core has been notified of the stop condition for this EO (won't dispatch - * any new events). - * - * This function should never be directly called from the application, - * it will be called by em_eo_stop(), which maintains state information. - * - * @param eo_ctx Execution object internal state data - * @param eo Execution object handle - * - * @return EM_OK if successful. - * - * @see em_eo_stop(), em_eo_create() - */ -typedef em_status_t (*em_stop_local_func_t)(void *eo_ctx, em_eo_t eo); - -/** - * Execution object (EO) stop function, global. - * - * The EO global stop function is called once on one core after the optional - * core local stop functions return on all cores. The system disables event - * dispatching before calling this function and also makes sure it does not get - * called before all cores have been notified of the stop condition for this EO - * (don't dispatch new events). - * - * This function should never be directly called from the application, - * it will be called by em_eo_stop(), which maintains state information. - * - * @param eo_ctx Execution object internal state data - * @param eo Execution object handle - * - * @return EM_OK if successful. - * - * @see em_eo_stop(), em_eo_create() - */ -typedef em_status_t (*em_stop_func_t)(void *eo_ctx, em_eo_t eo); - -/** - * Create an Execution Object (EO). - * - * Allocate an EO handle and initialize internal data for the new EO. - * The EO is left in a non-active state, i.e. no events are dispatched before - * em_eo_start() has been called. Start, stop and receive callback functions - * are mandatory arguments. - * - * The EO name is copied into EO internal data. The maximum length stored is - * EM_EO_NAME_LEN. Duplicate names are allowed, but find will only match one of - * them. - * - * @param name Name of the EO (optional, NULL ok) - * @param start Start function - * @param local_start Core local start function (NULL if no local start) - * @param stop Stop function - * @param local_stop Core local stop function (NULL if no local stop) - * @param receive Receive function - * @param eo_ctx User defined EO context data, EM passes the value - * (NULL if no context) - * - * @return New EO handle if successful, otherwise EM_EO_UNDEF. - * - * @see em_eo_start(), em_eo_delete(), em_queue_create(), em_eo_add_queue() - * @see em_start_func_t, em_stop_func_t, em_receive_func_t - */ -em_eo_t -em_eo_create(const char *name, - em_start_func_t start, em_start_local_func_t local_start, - em_stop_func_t stop, em_stop_local_func_t local_stop, - em_receive_func_t receive, const void *eo_ctx); - -/** - * EO parameters for em_eo_create_multircv(...) - */ -typedef struct { - /** - * EO start function, mandatory. - * Called once on one core, triggered by em_eo_start/_start_sync(). - * First EO-function to be called. - */ - em_start_func_t start; - /** - * EO core-local start function, optional (set NULL if not used). - * Called on all EM-cores after 'start' has completed. - */ - em_start_local_func_t local_start; - /** - * EO stop function, mandatory. - * Called once on one core, triggered by em_eo_stop/_stop_sync(). - * Last EO-function to be called. - */ - em_stop_func_t stop; - /** - * EO core-local stop function, optional (set NULL if not used). - * Called and completed on all EM-cores before 'stop'. - */ - em_stop_local_func_t local_stop; - /** - * EO receive function for multiple events, mandatory. - */ - em_receive_multi_func_t receive_multi; - /** - * Maximum number of events passed to the receive function. - * EM will dispatch 1 to 'max-events' at a time to the EO's multi-event - * receive function. - * Use '0' for an EM default value (=EM_EO_MULTIRCV_MAX_EVENTS). - * The user provided 'receive_multi' function must be able to handle - * 'max_events' events at a time. - */ - int max_events; - /** - * User defined EO context data, optional (NULL if no context). - * EM only passes the value. - */ - const void *eo_ctx; - - /** - * Internal check - don't touch! - * - * EM will verify that em_eo_multircv_param_init(param) has been called - * before creating an EO with em_eo_create_multircv(..., param) - */ - uint32_t __internal_check; -} em_eo_multircv_param_t; - -/** - * Initialize parameters for the multi-event receive-function EO. - * - * Initialize em_eo_multircv_param_t to default values for all fields. - * After initialization, the user further needs to set the mandatory fields of - * 'em_eo_multircv_param_t' before calling em_eo_create_multircv(). - * Always initialize 'param' first with em_eo_multircv_param_init(¶m) to - * ensure backwards compatibility with potentially added new options. - * - * @param param Address of the em_eo_multircv_param_t to be initialized - * - * @see em_eo_create_multircv() - */ -void em_eo_multircv_param_init(em_eo_multircv_param_t *param); - -/** - * Create an Execution Object (EO) with a multi-event receive function. - * - * Similar to em_eo_create(), except that an EO multi-event receive function is - * taken into use for the created EO, see em_receive_multi_func_t (passed via - * em_eo_multircv_param_t param). - * - * Always initialize 'param' first with em_eo_multircv_param_init(¶m) to - * ensure backwards compatibility before setting your own params and calling - * em_eo_create_multircv(): - * @code - * em_eo_multircv_param_t param; - * em_eo_t eo; - * - * em_eo_multircv_param_init(¶m); - * param.start = my_start_fn; - * param.stop = my_stop_fn; - * param.receive_multi = my_receive_multi_fn; - * param.max_events = MY_MAX_EVENTS; // or use default=0 - * ... - * eo = em_eo_create_multircv("my-eo", ¶m); - * if (unlikely(eo == EM_EO_UNDEF)) - * report_error(); - * @endcode - * - * @param name Name of the EO (optional, NULL ok) - * @param param EO parameters - * - * @return New EO handle if successful, otherwise EM_EO_UNDEF. - * - * @see em_eo_multircv_param_init() - * @see em_eo_start(), em_eo_start_sync(), em_eo_stop(), em_eo_stop_sync() - * @see em_start_func_t, em_stop_func_t, em_receive_multi_func_t - */ -em_eo_t -em_eo_create_multircv(const char *name, const em_eo_multircv_param_t *param); - -/** - * Delete Execution Object (EO). - * - * Immediately delete the given EO and free the identifier. - * - * NOTE, that an EO can only be deleted after it has been stopped using - * em_eo_stop(), otherwise another core might still access the EO data. - * All associated queues must be removed before deleting an EO. - * - * A sequence of - * @code - * em_eo_stop_sync(eo); - * em_eo_remove_queue_all_sync(eo, EM_TRUE); - * em_eo_delete(eo); - * @endcode - * will cleanly delete an EO from the EM point of view (not including user - * allocated data). - * - * @param eo EO handle to delete - * - * @return EM_OK if successful. - * - * @see em_eo_stop(), em_eo_remove_queue() - */ -em_status_t -em_eo_delete(em_eo_t eo); - -/** - * Returns the name given to the EO when it was created. - * - * A copy of the name string (up to 'maxlen' characters) is - * written to the user buffer 'name'. - * The string is always null terminated - even if the given buffer length - * is less than the name length. - * - * The function returns 0 and writes an empty string if the EO has no name. - * - * @param eo EO handle - * @param[out] name Destination buffer - * @param maxlen Maximum length (including the terminating '0') - * - * @return Number of characters written (excludes the terminating '0'). - * - * @see em_eo_create() - */ -size_t -em_eo_get_name(em_eo_t eo, char *name, size_t maxlen); - -/** - * Find EO by name. - * - * Finds an EO by the given name (exact match). An empty string will not match - * anything. The search is case sensitive. This function will return the first - * match only if there are duplicate names. - * - * @param name the name to look for - * - * @return EO handle or EM_EO_UNDEF if not found - * - * @see em_eo_create() - */ -em_eo_t -em_eo_find(const char *name); - -/** - * Add a queue to an EO, asynchronous (non-blocking) - * - * Add the given queue to the EO and enable scheduling for it. The function - * returns immediately, but the operation can be asynchronous and only fully - * complete later. The given notification events are sent when the operation has - * completed and the queue is ready to receive events. - * Note, that the completion notification(s) guarantee that the queue itself is - * operational, but if the target EO is not yet started then events sent into - * the queue will still be dropped by dispatcher. - * - * @param eo EO handle - * @param queue Queue handle - * @param num_notif Number of notification events, 0 for no notification - * @param notif_tbl Array of pairs of event and queue identifiers - * (+ optional event groups to send the events with) - * - * @return EM_OK if successful. - * - * @see em_queue_create(), em_eo_create(), em_eo_remove_queue(), - * em_eo_add_queue_sync() - */ -em_status_t -em_eo_add_queue(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Add a queue to an EO, synchronous (blocking) - * - * As em_eo_add_queue(), but does not return until the queue is ready to - * receive events. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param eo EO handle - * @param queue Queue handle - * - * @return EM_OK if successful. - * - * @see em_queue_create(), em_eo_create(), em_eo_remove_queue() - * @see em_eo_add_queue() for an asynchronous version of the API - */ -em_status_t -em_eo_add_queue_sync(em_eo_t eo, em_queue_t queue); - -/** - * Removes a queue from an EO, asynchronous (non-blocking) - * - * Disables queue scheduling and removes the queue from the EO. The function - * returns immediately, but the operation can be asynchronous and only fully - * complete later. The given notification events are sent when the operation has - * completed across all cores and no event from this queue is being dispatched - * anymore. Use notifications to know when the operation has fully completed - * and the queue can safely be deleted. - * - * @param eo EO handle - * @param queue Queue handle to remove - * @param num_notif Number of notification events, 0 for no notification - * @param notif_tbl Array of pairs of event and queue identifiers - * (+ optional event groups to send the events with) - * - * @return EM_OK if successful. - * - * @see em_eo_add_queue(), em_eo_remove_queue_sync() - */ -em_status_t -em_eo_remove_queue(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Removes a queue from an EO, synchronous (blocking) - * - * As em_eo_remove_queue(), but will not return until the queue has been - * disabled, removed from the EO and no more events are being processed from - * the queue. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param eo EO handle - * @param queue Queue handle to remove - * - * @return EM_OK if successful. - * - * @see em_eo_remove_queue() for an asynchronous version of the API - */ -em_status_t -em_eo_remove_queue_sync(em_eo_t eo, em_queue_t queue); - -/** - * Removes all queues from an EO, asynchronous (non-blocking) - * - * Like em_eo_remove_queue(), but removes all queues currently associated with - * the EO. - * The argument 'delete_queues' can be used to automatically also delete all - * queues by setting it to EM_TRUE (EM_FALSE otherwise). - * Note: any allocated queue contexts will still need to be handled elsewhere. - * - * @param eo EO handle - * @param delete_queues delete the EO's queues if set to EM_TRUE - * @param num_notif Number of notification events, 0 for no notification - * @param notif_tbl Array of pairs of event and queue identifiers - * (+ optional event groups to send the events with) - * - * @return EM_OK if successful. - * - * @see em_eo_add_queue(), em_eo_remove_queue_sync(), - * em_eo_remove_queue_all_sync() - */ -em_status_t -em_eo_remove_queue_all(em_eo_t eo, int delete_queues, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Removes all queues from an EO, synchronous (blocking). - * - * As em_eo_remove_queue_all(), but does not return until all queues have - * been removed. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param eo EO handle - * @param delete_queues delete the EO's queues if set to EM_TRUE - * - * @return EM_OK if successful. - * - * - * @see em_eo_remove_queue_all() for an asynchronous version of the API - */ -em_status_t -em_eo_remove_queue_all_sync(em_eo_t eo, int delete_queues); - -/** - * Register an EO specific error handler. - * - * The EO specific error handler is called if an error occurs or em_error() is - * called in the context of the running EO. - * Note, the provided function will override any previously registered - * error handler for the EO in question. - * The global error handler is called if no EO specific error handler is - * registered. - * - * @param eo EO handle - * @param handler New error handler - * - * @return EM_OK if successful. - * - * @see em_register_error_handler(), em_error_handler_t() - */ -em_status_t -em_eo_register_error_handler(em_eo_t eo, em_error_handler_t handler); - -/** - * Unregister an EO specific error handler. - * - * Removes a previously registered EO specific error handler and restores the - * global error handler into use for the EO. - * - * @param eo EO handle - * - * @return EM_OK if successful. - */ -em_status_t -em_eo_unregister_error_handler(em_eo_t eo); - -/** - * Start an Execution Object (EO), asynchronous (non-blocking) - * - * Start and enable a previously created EO. - * The em_eo_start() function will first call the user provided global EO start - * function. If that global start function returns EM_OK then events to trigger - * the (optional) user provided local start function are sent to all cores. - * The em_eo_start() function returns immediately after the global start - * returns, which means that the action only fully completes later. - * Notifications should be used if the caller needs to know when the EO start - * has fully completed. The given notification event(s) will be sent to the - * given queue(s) when the start is completed on all cores. - * - * Local start is not called and event dispatching is not enabled for this EO if - * the global start function does not return EM_OK. - * - * The notification(s) are sent when the global start function returns if a - * local start function hasn't been provided. - * Use '0' as 'num_notif' if notifications are not needed. Be aware of, - * is this case, that the EO may not immediately be ready to handle events. - * - * Note that events sent to scheduled queues from a user provided EO global or - * local start function are buffered. The buffered events will be sent into the - * queues when the EO start functions have all returned - otherwise it would not - * be possible to send events to the EO's own queues as the EO is not yet in a - * started state. No buffering is done when sending to queues that are - * not scheduled. - * - * The optional conf-argument can be used to pass applification specific - * information (e.g. configuration data) to the EO. - * - * @param eo EO handle - * @param[out] result Optional pointer to em_status_t, which gets updated to - * the return value of the actual user provided EO global - * start function. - * @param conf Optional startup configuration, NULL ok. - * @param num_notif If not 0, defines the number of notification events to - * send when all cores have returned from the start - * function(s). - * @param notif_tbl Array of em_notif_t, the optional notification events - * (array data is copied) - * - * @return EM_OK if successful. - * - * @see em_start_func_t(), em_start_local_func_t(), em_eo_stop(), - * em_eo_start_sync() - */ -em_status_t -em_eo_start(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Start Execution Object (EO), synchronous (blocking) - * - * As em_eo_start(), but will not return until the operation is complete. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param eo EO handle - * @param[out] result Optional pointer to em_status_t, which gets updated to - * the return value of the actual user provided EO global - * start function. - * @param conf Optional startup configuration, NULL ok. - * - * @return EM_OK if successful. - * - * @see em_start_func_t(), em_start_local_func_t(), em_eo_stop() - * @see em_eo_start() for an asynchronous version of the API - */ -em_status_t -em_eo_start_sync(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf); - -/** - * Stop Execution Object (EO), asynchronous (non-blocking) - * - * Disables event dispatch from all related queues, calls core local stop - * on all cores and finally calls the global stop function of the EO when all - * cores have returned from the (optional) core local stop. - * The call to the global EO stop is asynchronous and only done when all cores - * have completed processing of the receive function and/or core local stop. - * This guarantees no other core is accessing EO data during the EO global stop - * function. - * - * This function returns immediately, but may only fully complete later. If the - * caller needs to know when the EO stop has actually completed, the num_notif - * and notif_tbl should be used. The given notification event(s) will be sent to - * given queue(s) when the stop operation actually completes. - * If such notifications are not needed, use '0' as 'num_notif'. - * - * When the EO has stopped it can be started again with em_eo_start(). - * - * @param eo EO handle - * @param num_notif Number of notification events, 0 for no notification - * @param notif_tbl Array of pairs of event and queue identifiers - * (+ optional event groups to send the events with) - * - * @return EM_OK if successful. - * - * @see em_stop_func_t(), em_stop_local_func_t(), em_eo_start(), - * em_eo_stop_sync() - */ -em_status_t -em_eo_stop(em_eo_t eo, int num_notif, const em_notif_t notif_tbl[]); - -/** - * Stop Execution Object (EO), synchronous (blocking) - * - * As em_eo_stop(), but will not return until the operation is complete. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param eo EO handle - * - * @return EM_OK if successful. - * - * @see em_stop_func_t(), em_stop_local_func_t(), em_eo_start() - * @see em_eo_stop() for an asynchronous version of the API - */ -em_status_t -em_eo_stop_sync(em_eo_t eo); - -/** - * Return the currently active EO - * - * Returns the EO handle associated with the currently running EO function. - * Only valid if called within an EO-context, will return EM_EO_UNDEF otherwise. - * Can be called from the EO-receive or EO-start/stop functions (or subfunctions - * thereof). - * Note that calling em_eo_current() from e.g. an EO-start function that was - * launched from within another EO's receive will return the EO handle of the - * EO being started - i.e. always returns the 'latest' current EO. - * - * @return The current EO or EM_EO_UNDEF if no current EO (or error) - */ -em_eo_t -em_eo_current(void); - -/** - * Get EO specific (application) context. - * - * Returns the EO context pointer that the application has earlier provided via - * em_eo_create(). - * - * @param eo EO for which the context is requested - * - * @return EO specific context pointer or NULL if no context (or error) - */ -void * -em_eo_get_context(em_eo_t eo); - -/** - * Return the EO state. - * - * Returns the current state of the given EO. - * - * @return The current EO state or EM_EO_STATE_UNDEF if never created. - */ -em_eo_state_t -em_eo_get_state(em_eo_t eo); - -/** - * Initialize EO iteration and return the first EO handle. - * - * Can be used to initialize the iteration to retrieve all created EOs for - * debugging or management purposes. Use em_eo_get_next() after this call until - * it returns EM_EO_UNDEF. A new call to em_eo_get_first() resets the iteration, - * which is maintained per core (thread). The operation should be completed in - * one go before returning from the EO's event receive function (or start/stop). - * - * The number of EOs (output arg 'num') may not match the amount of EOs actually - * returned by iterating using em_eo_get_next() if EOs are added or removed in - * parallel by another core. The order of the returned EO handles is undefined. - * - * @code - * unsigned int num; - * em_eo_t eo = em_eo_get_first(&num); - * while (eo != EM_EO_UNDEF) { - * eo = em_eo_get_next(); - * } - * @endcode - * - * @param[out] num Pointer to an unsigned int to store the amount of EOs into - * @return The first EO handle or EM_EO_UNDEF if none exist - * - * @see em_eo_get_next() - */ -em_eo_t -em_eo_get_first(unsigned int *num); - -/** - * Return the next EO handle. - * - * Continues the EO iteration started by em_eo_get_first() and returns the next - * EO handle. - * - * @return The next EO handle or EM_EO_UNDEF if the EO iteration is completed - * (i.e. no more EO's available). - * - * @see em_eo_get_first() - */ -em_eo_t -em_eo_get_next(void); - -/** - * Initialize iteration of an EO's queues and return the first queue handle. - * - * Can be used to initialize the iteration to retrieve all queues associated - * with the given EO for debugging or management purposes. - * Use em_eo_queue_get_next() after this call until it returns EM_QUEUE_UNDEF. - * A new call to em_eo_queue_get_first() resets the iteration, which is - * maintained per core (thread). The operation should be started and completed - * in one go before returning from the EO's event receive function (or - * start/stop). - * - * The number of queues owned by the EO (output arg 'num') may not match the - * amount of queues actually returned by iterating using em_eo_queue_get_next() - * if queues are added or removed in parallel by another core. The order of - * the returned queue handles is undefined. - * - * Simplified example: - * @code - * unsigned int num; - * em_queue_t q = em_eo_queue_get_first(&num, eo); - * while (q != EM_QUEUE_UNDEF) { - * q = em_eo_queue_get_next(); - * } - * @endcode - * - * @param[out] num Output the current amount of queues associated with the EO - * @param eo EO handle - * - * @return The first queue handle or EM_QUEUE_UNDEF if none exist or the EO - * is invalid. - * - * @see em_eo_queue_get_next() - **/ -em_queue_t -em_eo_queue_get_first(unsigned int *num, em_eo_t eo); - -/** - * Return the EO's next queue handle. - * - * Continues the queue iteration started by em_eo_queue_get_first() and returns - * the next queue handle owned by the EO. - * - * @return The next queue handle or EM_QUEUE_UNDEF if the queue iteration is - * completed (i.e. no more queues available for this EO). - * - * @see em_eo_queue_get_first() - **/ -em_queue_t -em_eo_queue_get_next(void); - -/** - * Convert an EO handle to an unsigned integer - * - * @param eo EO handle to be converted - * @return uint64_t value that can be used to print/display the handle - * - * @note This routine is intended to be used for diagnostic purposes - * to enable applications to e.g. generate a printable value that represents - * an em_eo_t handle. - */ -uint64_t em_eo_to_u64(em_eo_t eo); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_EO_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_EO_H_ +#define EVENT_MACHINE_EO_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_eo Execution objects (EO) + * + * Operations on EO + * + * Execution objects (EO) are the application building blocks of EM. + * An EO typically implements one logical function or one stage in a pipeline, + * but alternatively the whole application could be implemented with one EO. + * EOs work as servers, queues are the service access points (inputs to the EO). + * + * An EO consists of user provided callback functions and context data. + * The most important function is the receive function, which gets called + * when an event is received from one of the queues associated with the EO. + * The EM scheduler selects the next event for processing on a core and the + * EM dispatcher on that core maps the received event and queue information to + * an EO receive function to call to process the event. + * Other EO functions are used to manage start-up and teardown of EOs. See + * individual EO functions for more details. + * + * em_eo_create() + * | + * v + * .-------------. + * .->.------->| CREATED | (new events discarded) + * | | '-------------' + * | | | em_eo_start(+notifs) / em_eo_start_sync() + * | | v + * | | .-------------. + * | | | STARTING | (new events discarded) + * | ' '-------------' + * | \ global start + * | \ THEN + * | \ local start on each core + * | '--- FAIL OK + * | | send 'start-completed' notifications + * | v + * . .-------------. + * | | RUNNING | + * | '-------------' + * | | em_eo_stop(+notifs) / em_eo_stop_sync() + * | v + * ' .-------------. + * \ | STOPPING | (new events discarded) + * \ '-------------' + * \ | + * \ v + * \ local stops on each core + * \ THEN + * \ global stops + * \ . + * \ / + * -------' send 'stop-completed' notifications + * + * @{ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/** + * Execution object (EO) event receive function (single-event) + * + * An application receives events through queues and these events are passed to + * the application's EO receive function(s) for processing. The EO receive + * function implements the main part of the application logic. EM calls the + * receive function when it has dequeued an event from one of the EO's queues. + * The application then processes the event and returns immediately in a + * run-to-completion fashion. There is no pre-emption. + * + * On multicore systems, several events (from the same or different queue) may + * be dequeued in parallel and thus the same receive function may be executed + * concurrently on several cores. Parallel execution may be limited by queue + * group setup or by using queues with an atomic scheduling mode. + * + * The EO and queue context pointers are user defined. The EO context is given + * at EO creation and the queue context is set with em_queue_set_context(). + * These contexts may be used in any way needed, the EM implementation will not + * dereference them. For example, the EO context may be used to store global + * EO state information, which is common to all queues and events for that EO. + * In addition, the queue context may be used to store queue specific state data + * (e.g. user data flow related data). The queue context data for an atomic + * queue can be freely manipulated in the receive function, since only one event + * at a time can be under work from that particular atomic queue. For other + * queue types it is up to the user to synchronize context access. The EO + * context is protected only if the EO has one queue and it is of type 'atomic' + * (applies also to several atomic queues that belong to the same atomic group). + * + * An event (handle) must be converted to an event structure pointer with + * em_event_pointer() before accessing any data it may contain. + * The event type specifies the event structure in memory, which is + * implementation or application specific. + * The queue handle specifies the queue where the event was dequeued from. + * + * The EO will not receive any events if it has not been successfully started. + * + * @param eo_ctx EO context data given to em_eo_create(), + * EM does not dereference. + * @param event Event handle + * @param type Event type + * @param queue Queue from which the event was dequeued + * @param q_ctx Queue context data. The context pointer is set by + * em_queue_set_context(), EM does not touch the data. + * + * @see em_eo_create(), + * em_alloc(), em_free(), em_send(), + * em_event_pointer(), em_queue_set_context() + */ +typedef void (*em_receive_func_t)(void *eo_ctx, + em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +/** + * Execution object (EO) multi-event receive function + * + * Similar to the single-event receive function (em_receive_func_t), except that + * multiple events can be passed with one call to the EO receive function. + * A multi-event receive function is taken into use during EO creation with a + * call to em_eo_create_multircv(...). The maximum number of events that the + * multi-event EO receive function is prepared to handle can be passed with the + * argument 'max_events' of em_eo_create_multircv(). The EM dispatcher will + * split event batches larger than 'max_events' into chunks of 'max_events'. + * + * Event group handling: + * All events passed by the EM dispatcher to the EO multi-event receive function + * belong to the same event group (or none) - a batch of events containing + * multiple event groups is split by the dispatcher into smaller chunks, each + * chunk belonging to the same event group (or none). + * The event group count is decremented by the number of events passed to the + * receive function when execution returns to the dispatcher. + * + * Note: Contrary to the single-event EO receive function (em_receive_func_t), + * no event types are passed. Use appropriate event APIs if the event types + * are needed. + * + * @param eo_ctx EO context data given to em_eo_create_multircv(), + * EM does not dereference. + * @param events Event handles: events[num] + * @param num Number of events received + * (0 to 'max_events' of em_eo_create_multircv()) + * @param queue Queue from which the event was dequeued + * @param q_ctx Queue context data. The context pointer is set by + * em_queue_set_context(), EM does not touch the data. + * + * @see em_eo_create_multircv(), + * em_alloc(), em_free(), em_send(), + * em_event_pointer(), em_queue_set_context() + */ +typedef void (*em_receive_multi_func_t)(void *eo_ctx, + em_event_t events[], int num, + em_queue_t queue, void *q_ctx); + +/** + * Execution object (EO) start function, global. + * + * This EO callback function is called once on one core by em_eo_start(). + * The purpose of this global EO-start is to provide a placeholder for first + * level EO initialization, e.g. allocating memory and initializing shared data. + * After this global start returns, the EO core local start function (if given) + * is called on all cores in this EM instance. If there is no core local start, + * then event dispatching is enabled as this function returns, otherwise the EO + * is enabled only when all core local starts have completed successfully on all + * the cores. If this function does not return EM_OK, the system will not call + * the core local init and will not enable event dispatching for this EO. + * + * Note that events sent to scheduled queues from a start function are + * buffered. The buffered events will be sent into the queues when the EO start + * functions have returned - otherwise it would not be possible to send events + * to the EO's own queues as the EO is not yet in a started state. No buffering + * is done when sending to queues that are not scheduled. + * + * The last argument is an optional startup configuration passed directly + * from em_eo_start(). If local start functions need the configuration data, + * it must be saved during the global start. + * + * This function should never be directly called from the application, + * it will be called by em_eo_start(), which maintains state information. + * + * @param eo_ctx Execution object internal state/instance data + * @param eo Execution object handle + * @param conf Optional startup configuration, NULL ok. + * + * @return EM_OK if successful, other values abort EO start + * + * @see em_eo_start(), em_eo_create() + */ +typedef em_status_t (*em_start_func_t)(void *eo_ctx, em_eo_t eo, + const em_eo_conf_t *conf); + +/** + * Execution object (EO) start function, core local. + * + * This is similar to the global start above, but this one is called after the + * global start has completed and is run on all cores of the EM instance + * potentially in parallel. + * + * The purpose of this optional local start is to work as a placeholder for + * core local initialization, e.g. allocating core local memory. + * + * Note that events sent to scheduled queues from local start functions are + * buffered. The buffered events will be sent into the queues when the EO start + * functions have returned - otherwise it would not be possible to send events + * to the EO's own queues as the EO is not yet in a started state. No buffering + * is done when sending to queues that are not scheduled. + * + * This function should never be directly called from the application, + * it will be called by em_eo_start(), which maintains state information. + * + * Event dispatching is not enabled if this function doesn't return EM_OK on + * all cores. + * + * @param eo_ctx Execution object internal state/instance data + * @param eo Execution object handle + * + * @return EM_OK if successful, other values prevent EO start + * + * @see em_eo_start(), em_eo_create() + */ +typedef em_status_t (*em_start_local_func_t)(void *eo_ctx, em_eo_t eo); + +/** + * Execution object (EO) stop function, core local. + * + * This function is called once on each core of the EM instance before the + * global stop (reverse order of start). The system disables event dispatching + * before calling these and also makes sure this does not get called before + * the core has been notified of the stop condition for this EO (won't dispatch + * any new events). + * + * This function should never be directly called from the application, + * it will be called by em_eo_stop(), which maintains state information. + * + * @param eo_ctx Execution object internal state data + * @param eo Execution object handle + * + * @return EM_OK if successful. + * + * @see em_eo_stop(), em_eo_create() + */ +typedef em_status_t (*em_stop_local_func_t)(void *eo_ctx, em_eo_t eo); + +/** + * Execution object (EO) stop function, global. + * + * The EO global stop function is called once on one core after the optional + * core local stop functions return on all cores. The system disables event + * dispatching before calling this function and also makes sure it does not get + * called before all cores have been notified of the stop condition for this EO + * (don't dispatch new events). + * + * This function should never be directly called from the application, + * it will be called by em_eo_stop(), which maintains state information. + * + * @param eo_ctx Execution object internal state data + * @param eo Execution object handle + * + * @return EM_OK if successful. + * + * @see em_eo_stop(), em_eo_create() + */ +typedef em_status_t (*em_stop_func_t)(void *eo_ctx, em_eo_t eo); + +/** + * Create an Execution Object (EO). + * + * Allocate an EO handle and initialize internal data for the new EO. + * The EO is left in a non-active state, i.e. no events are dispatched before + * em_eo_start() has been called. Start, stop and receive callback functions + * are mandatory arguments. + * + * The EO name is copied into EO internal data. The maximum length stored is + * EM_EO_NAME_LEN. Duplicate names are allowed, but find will only match one of + * them. + * + * @param name Name of the EO (optional, NULL ok) + * @param start Start function + * @param local_start Core local start function (NULL if no local start) + * @param stop Stop function + * @param local_stop Core local stop function (NULL if no local stop) + * @param receive Receive function + * @param eo_ctx User defined EO context data, EM passes the value + * (NULL if no context) + * + * @return New EO handle if successful, otherwise EM_EO_UNDEF. + * + * @see em_eo_start(), em_eo_delete(), em_queue_create(), em_eo_add_queue() + * @see em_start_func_t, em_stop_func_t, em_receive_func_t + */ +em_eo_t +em_eo_create(const char *name, + em_start_func_t start, em_start_local_func_t local_start, + em_stop_func_t stop, em_stop_local_func_t local_stop, + em_receive_func_t receive, const void *eo_ctx); + +/** + * EO parameters for em_eo_create_multircv(...) + */ +typedef struct { + /** + * EO start function, mandatory. + * Called once on one core, triggered by em_eo_start/_start_sync(). + * First EO-function to be called. + */ + em_start_func_t start; + /** + * EO core-local start function, optional (set NULL if not used). + * Called on all EM-cores after 'start' has completed. + */ + em_start_local_func_t local_start; + /** + * EO stop function, mandatory. + * Called once on one core, triggered by em_eo_stop/_stop_sync(). + * Last EO-function to be called. + */ + em_stop_func_t stop; + /** + * EO core-local stop function, optional (set NULL if not used). + * Called and completed on all EM-cores before 'stop'. + */ + em_stop_local_func_t local_stop; + /** + * EO receive function for multiple events, mandatory. + */ + em_receive_multi_func_t receive_multi; + /** + * Maximum number of events passed to the receive function. + * EM will dispatch 1 to 'max-events' at a time to the EO's multi-event + * receive function. + * Use '0' for an EM default value (=EM_EO_MULTIRCV_MAX_EVENTS). + * The user provided 'receive_multi' function must be able to handle + * 'max_events' events at a time. + */ + int max_events; + /** + * User defined EO context data, optional (NULL if no context). + * EM only passes the value. + */ + const void *eo_ctx; + + /** + * Internal check - don't touch! + * + * EM will verify that em_eo_multircv_param_init(param) has been called + * before creating an EO with em_eo_create_multircv(..., param) + */ + uint32_t __internal_check; +} em_eo_multircv_param_t; + +/** + * Initialize parameters for the multi-event receive-function EO. + * + * Initialize em_eo_multircv_param_t to default values for all fields. + * After initialization, the user further needs to set the mandatory fields of + * 'em_eo_multircv_param_t' before calling em_eo_create_multircv(). + * Always initialize 'param' first with em_eo_multircv_param_init(¶m) to + * ensure backwards compatibility with potentially added new options. + * + * @param param Address of the em_eo_multircv_param_t to be initialized + * + * @see em_eo_create_multircv() + */ +void em_eo_multircv_param_init(em_eo_multircv_param_t *param); + +/** + * Create an Execution Object (EO) with a multi-event receive function. + * + * Similar to em_eo_create(), except that an EO multi-event receive function is + * taken into use for the created EO, see em_receive_multi_func_t (passed via + * em_eo_multircv_param_t param). + * + * Always initialize 'param' first with em_eo_multircv_param_init(¶m) to + * ensure backwards compatibility before setting your own params and calling + * em_eo_create_multircv(): + * @code + * em_eo_multircv_param_t param; + * em_eo_t eo; + * + * em_eo_multircv_param_init(¶m); + * param.start = my_start_fn; + * param.stop = my_stop_fn; + * param.receive_multi = my_receive_multi_fn; + * param.max_events = MY_MAX_EVENTS; // or use default=0 + * ... + * eo = em_eo_create_multircv("my-eo", ¶m); + * if (unlikely(eo == EM_EO_UNDEF)) + * report_error(); + * @endcode + * + * @param name Name of the EO (optional, NULL ok) + * @param param EO parameters + * + * @return New EO handle if successful, otherwise EM_EO_UNDEF. + * + * @see em_eo_multircv_param_init() + * @see em_eo_start(), em_eo_start_sync(), em_eo_stop(), em_eo_stop_sync() + * @see em_start_func_t, em_stop_func_t, em_receive_multi_func_t + */ +em_eo_t +em_eo_create_multircv(const char *name, const em_eo_multircv_param_t *param); + +/** + * Delete Execution Object (EO). + * + * Immediately delete the given EO and free the identifier. + * + * NOTE, that an EO can only be deleted after it has been stopped using + * em_eo_stop() with notifications or em_eo_stop_sync(), otherwise another core + * might still access the EO data. + * All associated queues must be removed before deleting an EO. + * + * A sequence of + * @code + * em_eo_stop_sync(eo); + * em_eo_remove_queue_all_sync(eo, EM_TRUE); + * em_eo_delete(eo); + * @endcode + * will cleanly delete an EO from the EM point of view (not including user + * allocated data). + * + * @param eo EO handle to delete + * + * @return EM_OK if successful. + * + * @see em_eo_stop(), em_eo_remove_queue() + */ +em_status_t +em_eo_delete(em_eo_t eo); + +/** + * Returns the name given to the EO when it was created. + * + * A copy of the name string (up to 'maxlen' characters) is + * written to the user buffer 'name'. + * The string is always null terminated - even if the given buffer length + * is less than the name length. + * + * The function returns 0 and writes an empty string if the EO has no name. + * + * @param eo EO handle + * @param[out] name Destination buffer + * @param maxlen Maximum length (including the terminating '0') + * + * @return Number of characters written (excludes the terminating '0'). + * + * @see em_eo_create() + */ +size_t +em_eo_get_name(em_eo_t eo, char *name, size_t maxlen); + +/** + * Find EO by name. + * + * Finds an EO by the given name (exact match). An empty string will not match + * anything. The search is case sensitive. This function will return the first + * match only if there are duplicate names. + * + * @param name the name to look for + * + * @return EO handle or EM_EO_UNDEF if not found + * + * @see em_eo_create() + */ +em_eo_t +em_eo_find(const char *name); + +/** + * Add a queue to an EO, asynchronous (non-blocking) + * + * Add the given queue to the EO and enable scheduling for it. The function + * returns immediately, but the operation can be asynchronous and only fully + * complete later. The given notification events are sent when the operation has + * completed and the queue is ready to receive events. + * Note, that the completion notification(s) guarantee that the queue itself is + * operational, but if the target EO is not yet started then events sent into + * the queue will still be dropped by dispatcher. + * + * @param eo EO handle + * @param queue Queue handle + * @param num_notif Number of notification events, 0 for no notification + * @param notif_tbl Array of pairs of event and queue identifiers + * (+ optional event groups to send the events with) + * + * @return EM_OK if successful. + * + * @see em_queue_create(), em_eo_create(), em_eo_remove_queue(), + * em_eo_add_queue_sync() + */ +em_status_t +em_eo_add_queue(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Add a queue to an EO, synchronous (blocking) + * + * As em_eo_add_queue(), but does not return until the queue is ready to + * receive events. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param eo EO handle + * @param queue Queue handle + * + * @return EM_OK if successful. + * + * @see em_queue_create(), em_eo_create(), em_eo_remove_queue() + * @see em_eo_add_queue() for an asynchronous version of the API + */ +em_status_t +em_eo_add_queue_sync(em_eo_t eo, em_queue_t queue); + +/** + * Removes a queue from an EO, asynchronous (non-blocking) + * + * Disables queue scheduling and removes the queue from the EO. The function + * returns immediately, but the operation can be asynchronous and only fully + * complete later. The given notification events are sent when the operation has + * completed across all cores and no event from this queue is being dispatched + * anymore. Use notifications to know when the operation has fully completed + * and the queue can safely be deleted. + * + * @param eo EO handle + * @param queue Queue handle to remove + * @param num_notif Number of notification events, 0 for no notification + * @param notif_tbl Array of pairs of event and queue identifiers + * (+ optional event groups to send the events with) + * + * @return EM_OK if successful. + * + * @see em_eo_add_queue(), em_eo_remove_queue_sync() + */ +em_status_t +em_eo_remove_queue(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Removes a queue from an EO, synchronous (blocking) + * + * As em_eo_remove_queue(), but will not return until the queue has been + * disabled, removed from the EO and no more events are being processed from + * the queue. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param eo EO handle + * @param queue Queue handle to remove + * + * @return EM_OK if successful. + * + * @see em_eo_remove_queue() for an asynchronous version of the API + */ +em_status_t +em_eo_remove_queue_sync(em_eo_t eo, em_queue_t queue); + +/** + * Removes all queues from an EO, asynchronous (non-blocking) + * + * Like em_eo_remove_queue(), but removes all queues currently associated with + * the EO. + * The argument 'delete_queues' can be used to automatically also delete all + * queues by setting it to EM_TRUE (EM_FALSE otherwise). + * Note: any allocated queue contexts will still need to be handled elsewhere. + * + * @param eo EO handle + * @param delete_queues delete the EO's queues if set to EM_TRUE + * @param num_notif Number of notification events, 0 for no notification + * @param notif_tbl Array of pairs of event and queue identifiers + * (+ optional event groups to send the events with) + * + * @return EM_OK if successful. + * + * @see em_eo_add_queue(), em_eo_remove_queue_sync(), + * em_eo_remove_queue_all_sync() + */ +em_status_t +em_eo_remove_queue_all(em_eo_t eo, int delete_queues, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Removes all queues from an EO, synchronous (blocking). + * + * As em_eo_remove_queue_all(), but does not return until all queues have + * been removed. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param eo EO handle + * @param delete_queues delete the EO's queues if set to EM_TRUE + * + * @return EM_OK if successful. + * + * + * @see em_eo_remove_queue_all() for an asynchronous version of the API + */ +em_status_t +em_eo_remove_queue_all_sync(em_eo_t eo, int delete_queues); + +/** + * Register an EO specific error handler. + * + * The EO specific error handler is called if an error occurs or em_error() is + * called in the context of the running EO. + * Note, the provided function will override any previously registered + * error handler for the EO in question. + * The global error handler is called if no EO specific error handler is + * registered. + * + * @param eo EO handle + * @param handler New error handler + * + * @return EM_OK if successful. + * + * @see em_register_error_handler(), em_error_handler_t() + */ +em_status_t +em_eo_register_error_handler(em_eo_t eo, em_error_handler_t handler); + +/** + * Unregister an EO specific error handler. + * + * Removes a previously registered EO specific error handler and restores the + * global error handler into use for the EO. + * + * @param eo EO handle + * + * @return EM_OK if successful. + */ +em_status_t +em_eo_unregister_error_handler(em_eo_t eo); + +/** + * Start an Execution Object (EO), asynchronous (non-blocking) + * + * Start and enable a previously created EO. + * The em_eo_start() function will first call the user provided global EO start + * function. If that global start function returns EM_OK then events to trigger + * the (optional) user provided local start function are sent to all cores. + * The em_eo_start() function returns immediately after the global start + * returns, which means that the action only fully completes later. + * Notifications should be used if the caller needs to know when the EO start + * has fully completed. The given notification event(s) will be sent to the + * given queue(s) when the start is completed on all cores. + * + * Local start is not called and event dispatching is not enabled for this EO if + * the global start function does not return EM_OK. + * + * The notification(s) are sent when the global start function returns if a + * local start function hasn't been provided. + * Use '0' as 'num_notif' if notifications are not needed. Be aware of, + * is this case, that the EO may not immediately be ready to handle events. + * + * Note that events sent to scheduled queues from a user provided EO global or + * local start function are buffered. The buffered events will be sent into the + * queues when the EO start functions have all returned - otherwise it would not + * be possible to send events to the EO's own queues as the EO is not yet in a + * started state. No buffering is done when sending to queues that are + * not scheduled. + * + * The optional conf-argument can be used to pass applification specific + * information (e.g. configuration data) to the EO. + * + * @param eo EO handle + * @param[out] result Optional pointer to em_status_t, which gets updated to + * the return value of the actual user provided EO global + * start function. + * @param conf Optional startup configuration, NULL ok. + * @param num_notif If not 0, defines the number of notification events to + * send when all cores have returned from the start + * function(s). + * @param notif_tbl Array of em_notif_t, the optional notification events + * (array data is copied) + * + * @return EM_OK if successful. + * + * @see em_start_func_t(), em_start_local_func_t(), em_eo_stop(), + * em_eo_start_sync() + */ +em_status_t +em_eo_start(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Start Execution Object (EO), synchronous (blocking) + * + * As em_eo_start(), but will not return until the operation is complete. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param eo EO handle + * @param[out] result Optional pointer to em_status_t, which gets updated to + * the return value of the actual user provided EO global + * start function. + * @param conf Optional startup configuration, NULL ok. + * + * @return EM_OK if successful. + * + * @see em_start_func_t(), em_start_local_func_t(), em_eo_stop() + * @see em_eo_start() for an asynchronous version of the API + */ +em_status_t +em_eo_start_sync(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf); + +/** + * Stop Execution Object (EO), asynchronous (non-blocking) + * + * Disables event dispatch from all related queues, calls core local stop + * on all cores and finally calls the global stop function of the EO when all + * cores have returned from the (optional) core local stop. + * The call to the global EO stop is asynchronous and only done when all cores + * have completed processing of the receive function and/or core local stop. + * This guarantees no other core is accessing EO data during the EO global stop + * function. + * + * This function returns immediately, but may only fully complete later. If the + * caller needs to know when the EO stop has actually completed, the num_notif + * and notif_tbl should be used. The given notification event(s) will be sent to + * given queue(s) when the stop operation actually completes. + * If such notifications are not needed, use '0' as 'num_notif'. + * + * When the EO has stopped it can be started again with em_eo_start(). + * + * @param eo EO handle + * @param num_notif Number of notification events, 0 for no notification + * @param notif_tbl Array of pairs of event and queue identifiers + * (+ optional event groups to send the events with) + * + * @return EM_OK if successful. + * + * @see em_stop_func_t(), em_stop_local_func_t(), em_eo_start(), + * em_eo_stop_sync() + */ +em_status_t +em_eo_stop(em_eo_t eo, int num_notif, const em_notif_t notif_tbl[]); + +/** + * Stop Execution Object (EO), synchronous (blocking) + * + * As em_eo_stop(), but will not return until the operation is complete. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param eo EO handle + * + * @return EM_OK if successful. + * + * @see em_stop_func_t(), em_stop_local_func_t(), em_eo_start() + * @see em_eo_stop() for an asynchronous version of the API + */ +em_status_t +em_eo_stop_sync(em_eo_t eo); + +/** + * Return the currently active EO + * + * Returns the EO handle associated with the currently running EO function. + * Only valid if called within an EO-context, will return EM_EO_UNDEF otherwise. + * Can be called from the EO-receive or EO-start/stop functions (or subfunctions + * thereof). + * Note that calling em_eo_current() from e.g. an EO-start function that was + * launched from within another EO's receive will return the EO handle of the + * EO being started - i.e. always returns the 'latest' current EO. + * + * @return The current EO or EM_EO_UNDEF if no current EO (or error) + */ +em_eo_t +em_eo_current(void); + +/** + * Get EO specific (application) context. + * + * Returns the EO context pointer that the application has earlier provided via + * em_eo_create(). + * + * @param eo EO for which the context is requested + * + * @return EO specific context pointer or NULL if no context (or error) + */ +void * +em_eo_get_context(em_eo_t eo); + +/** + * Return the EO state. + * + * Returns the current state of the given EO. + * + * @return The current EO state or EM_EO_STATE_UNDEF if never created. + */ +em_eo_state_t +em_eo_get_state(em_eo_t eo); + +/** + * Initialize EO iteration and return the first EO handle. + * + * Can be used to initialize the iteration to retrieve all created EOs for + * debugging or management purposes. Use em_eo_get_next() after this call until + * it returns EM_EO_UNDEF. A new call to em_eo_get_first() resets the iteration, + * which is maintained per core (thread). The operation should be completed in + * one go before returning from the EO's event receive function (or start/stop). + * + * The number of EOs (output arg 'num') may not match the amount of EOs actually + * returned by iterating using em_eo_get_next() if EOs are added or removed in + * parallel by another core. The order of the returned EO handles is undefined. + * + * @code + * unsigned int num; + * em_eo_t eo = em_eo_get_first(&num); + * while (eo != EM_EO_UNDEF) { + * eo = em_eo_get_next(); + * } + * @endcode + * + * @param[out] num Pointer to an unsigned int to store the amount of EOs into + * @return The first EO handle or EM_EO_UNDEF if none exist + * + * @see em_eo_get_next() + */ +em_eo_t +em_eo_get_first(unsigned int *num); + +/** + * Return the next EO handle. + * + * Continues the EO iteration started by em_eo_get_first() and returns the next + * EO handle. + * + * @return The next EO handle or EM_EO_UNDEF if the EO iteration is completed + * (i.e. no more EO's available). + * + * @see em_eo_get_first() + */ +em_eo_t +em_eo_get_next(void); + +/** + * Initialize iteration of an EO's queues and return the first queue handle. + * + * Can be used to initialize the iteration to retrieve all queues associated + * with the given EO for debugging or management purposes. + * Use em_eo_queue_get_next() after this call until it returns EM_QUEUE_UNDEF. + * A new call to em_eo_queue_get_first() resets the iteration, which is + * maintained per core (thread). The operation should be started and completed + * in one go before returning from the EO's event receive function (or + * start/stop). + * + * The number of queues owned by the EO (output arg 'num') may not match the + * amount of queues actually returned by iterating using em_eo_queue_get_next() + * if queues are added or removed in parallel by another core. The order of + * the returned queue handles is undefined. + * + * Simplified example: + * @code + * unsigned int num; + * em_queue_t q = em_eo_queue_get_first(&num, eo); + * while (q != EM_QUEUE_UNDEF) { + * q = em_eo_queue_get_next(); + * } + * @endcode + * + * @param[out] num Output the current amount of queues associated with the EO + * @param eo EO handle + * + * @return The first queue handle or EM_QUEUE_UNDEF if none exist or the EO + * is invalid. + * + * @see em_eo_queue_get_next() + **/ +em_queue_t +em_eo_queue_get_first(unsigned int *num, em_eo_t eo); + +/** + * Return the EO's next queue handle. + * + * Continues the queue iteration started by em_eo_queue_get_first() and returns + * the next queue handle owned by the EO. + * + * @return The next queue handle or EM_QUEUE_UNDEF if the queue iteration is + * completed (i.e. no more queues available for this EO). + * + * @see em_eo_queue_get_first() + **/ +em_queue_t +em_eo_queue_get_next(void); + +/** + * Convert an EO handle to an unsigned integer + * + * @param eo EO handle to be converted + * @return uint64_t value that can be used to print/display the handle + * + * @note This routine is intended to be used for diagnostic purposes + * to enable applications to e.g. generate a printable value that represents + * an em_eo_t handle. + */ +uint64_t em_eo_to_u64(em_eo_t eo); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_EO_H_ */ diff --git a/include/event_machine/api/event_machine_event_group.h b/include/event_machine/api/event_machine_event_group.h index 89abc4e4..d8e4d45a 100644 --- a/include/event_machine/api/event_machine_event_group.h +++ b/include/event_machine/api/event_machine_event_group.h @@ -1,477 +1,477 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_EVENT_GROUP_H_ -#define EVENT_MACHINE_EVENT_GROUP_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_event_group Event group - * Event Machine fork-join helper. - * @{ - * - * An event group can be used to trigger a join of parallel operations in the - * form of notification events. The number of parallel operations needs to be - * known in advance by the event group creator, but the separate event handlers - * don't necessarily need to know anything about the other related events. - * An event group is functionally a shared atomic counter decremented when each - * related event has been handled (EO-receive() returns). The notification - * events are automatically sent once the count reaches zero. - * - * There are two separate main usage patterns: - * - * Sender originated (original): - * ---------------------------- - * 1. an event group is allocated with em_event_group_create(). - * - * 2. the number of parallel events and the notifications are set with - * em_event_group_apply(). - * - * 3. the (parallel) events are sent normally but using em_send_group() instead - * of em_send(). This tags the event with the given event group. - * - * 4. once received by a core the tag is used to switch core specific current - * event group to the one in the tag. The receiver EO handles the event - * normally (does not see any difference). - * - * 5. as the receive function returns the count of the current event group is - * decremented. If the count reaches zero (last event) the related - * notification event(s) are sent automatically and can trigger the next - * operation for the application. - * - * 6. the sequence can continue from step 2 for a new set of events if the - * event group is to be reused. - * - * Receiver originated (API 1.2): - * ----------------------------- - * 1. an event group is created with em_event_group_create(). - * - * 2. the number of parallel events and the notifications are set with - * em_event_group_apply(). - * - * 3. during the processing of any received event that is not already tagged to - * belong to an event group, em_event_group_assign() can be used to set the - * current event group (a core local value). The rest is then equivalent to - * as if the event was originally sent to an event group. - * - * 4. as the receive function returns the count of the current event group is - * decremented. If the count reaches zero (last event) the related - * notification event(s) are sent automatically and can trigger the next - * operation for the application. - * - * 5. the sequence can continue from step 2 for a new set of events if the - * event group is to be reused. - * - * From an application (EO) point of view, an event group can get activated - * either by entering the EO receive with an event tagged to an event group or - * by explicitly calling em_event_group_assign. The current event group is core - * local and only one event group can be active (current) at a time. - * Assigning a received event that already is tagged to an event group, e.g. - * sent with em_send_group(), is not allowed unless the event group is - * deactivated first with em_event_group_processing_end(). - * The current event group gets deactivated by exiting the EO receive function - * or by explicitly calling em_event_group_processing_end(). Deactivation means - * the count of the event group is decremented and if the count reaches zero - * the notification events are sent. - * The current event group is local to a core (dispatcher) and exists only - * within the EO receive function. - * - * Note, that event groups may only work with events that are to be handled by - * an EO, i.e. SW events. - * - * OpenEM implementation should internally use a generation count or other - * technique to make sure that em_event_group_abort() can stop a problem - * propagation, i.e. after a group is aborted (and applied a new count) any - * potential delayed event(s) from the previous cycle will not cause the new - * count to be decremented. - * The same should be valid for excess group events, i.e. when sending more - * than the applied count. - * To make it possible for the application to properly handle such problems, - * the implementation should pre-check incoming events and call error handler - * before giving the event to an EO. This makes it possible for the application - * to choose whether to drop those events (at the error handler) or let them be - * processed. - * - * It is not allowed to use event references with event groups since assigning - * an event that has references to an event group would assign all the - * references to the event group resulting in undefined behaviour. E.g. using - * em_send_group()/em_send_group_multi() to send a reference is wrong. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -/** - * Create a new event group for fork-join. - * - * The amount of simultaneous event groups can be limited. - * - * @return The new event group or EM_EVENT_GROUP_UNDEF if no event group is - * available. - * - * @see em_event_group_delete(), em_event_group_apply() - */ -em_event_group_t em_event_group_create(void); - -/** - * Delete (unallocate) an event group. - * - * An event group must not be deleted before it has been completed - * (count reached zero) or aborted. A created but never applied event group - * can be deleted. - * - * @param event_group Event group to delete - * - * @return EM_OK if successful. - * - * @see em_event_group_create(), em_event_group_abort() - */ -em_status_t em_event_group_delete(em_event_group_t event_group); - -/** - * Apply event group configuration. - * - * This function sets the event count and notification parameters for the event - * group. After it returns, events sent or assigned to the event group are - * counted against the current count value. Notification events are sent when - * all (counted) events have been processed (count is decremented at EO receive - * return or by calling em_event_group_processing_end()). A new apply call is - * needed to re-use the event group for another cycle (with a new count and - * notifications). - * - * Notification events can optionally be sent to/tagged with another event - * group but not with the same event group that triggered the notifications, - * see em_notif_t for more. - * - * @attention em_event_group_apply() can only be used on a newly created event - * group or when the previous cycle is completed or successfully aborted. - * Application can use em_event_group_is_ready() to detect whether apply is - * allowed but would normally use a notification to setup a new cycle - * (implementation must make sure that when any of the notifications is - * received the group is ready for new apply). - * - * Apply should only be called once per group cycle. - * - * @param event_group Event group - * @param count Number of events in the group (positive integer) - * @param num_notif Number of notification events to send - * @param notif_tbl Table of notifications (events and target queues) - * - * @return EM_OK if successful. - * - * @see em_event_group_create(), em_send_group(), em_event_group_is_ready(), - * em_notif_t - */ -em_status_t em_event_group_apply(em_event_group_t event_group, int count, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Increment the current event group count. - * - * Increments the event count of the currently active event group (received or - * assigned event). Enables sending new events into the current event group. - * The event count cannot be decremented and this will fail if there is no - * current event group. - * - * @param count Number of events to add to the event group (positive integer) - * - * @return EM_OK if successful. - * - * @see em_send_group(), em_event_group_apply() - */ -em_status_t em_event_group_increment(int count); - -/** - * Checks if the event group is ready for 'apply'. - * - * Returns EM_TRUE (1) if the given event group is ready, i.e. the user can do - * em_event_group_apply() again. A better alternative to this is to use a - * related notification event to re-use the event group (apply can always be - * used when handling a notification event from the event group). - * - * An event group that has been applied a count but no events sent is not - * considered 'ready for apply'. If a change is needed the group has to be - * aborted and then re-applied. - * - * Return value EM_TRUE does not guarantee all notifications are received nor - * handled, but the event group count has reached zero and the event group - * is ready for a new apply. - * - * @param event_group Event group - * - * @return EM_TRUE if the given event group is ready for apply - * - * @see em_event_group_create(), em_event_group_apply() - */ -int em_event_group_is_ready(em_event_group_t event_group); - -/** - * Return the currently active event group. - * - * Returns the current event group or EM_EVENT_GROUP_UNDEF if an event group is - * not active (i.e. never activated or deactivated using - * em_event_group_processing_end()). - * - * Can only be used within an EO receive function. - * - * @return Current event group or EM_EVENT_GROUP_UNDEF - * - * @see em_event_group_create() - */ -em_event_group_t em_event_group_current(void); - -/** - * Send event associated with/tagged to an event group. - * - * Any valid event and destination queue parameters can be used. The event - * group indicates which event group the event is tagged to. The event group - * has to first be created and applied a count. - * One should always send the correct amount of events to an event group, i.e. - * matching the applied count. - * - * Event group is not supported with unscheduled queues. - * - * It is not allowed to use event references with event groups since assigning - * an event that has references to an event group would assign all the - * references to the event group resulting in undefined behaviour. E.g. using - * em_send_group() to send a reference is wrong. - * - * @param event Event to send - * @param queue Destination queue - * @param event_group Event group - * - * @return EM_OK if successful. - * - * @see em_send(), em_event_group_create(), em_event_group_apply(), - * em_event_group_increment() - */ -em_status_t em_send_group(em_event_t event, em_queue_t queue, - em_event_group_t event_group); - -/** - * Send multiple events associated with/tagged to an event group. - * - * This is like em_send_group, but multiple events can be sent with one call - * for potential performance gain. - * The call returns the number of events actually sent. A return value equal to - * 'num' means that all events were sent. A value less than 'num' means the - * events at the end of the given event list were not sent and must be handled - * by the application. - * The function will not modify the given list of events. - * - * Event group is not supported with unscheduled queues. - * - * It is not allowed to use event references with event groups since assigning - * an event that has references to an event group would assign all the - * references to the event group resulting in undefined behaviour. E.g. using - * em_send_group_multi() to send references is wrong. - * - * @param events List of events to send (i.e. ptr to array of events) - * @param num Number of events - * @param queue Destination queue - * @param event_group Event group - * - * @return number of events successfully sent (equal to num if all successful) - * - * @see em_send_group() - */ -int em_send_group_multi(const em_event_t events[], int num, em_queue_t queue, - em_event_group_t event_group); - -/** - * Signal early end of processing of the current event group - * - * This is an optional call that can be used to move the implicit event group - * handling (decrementing the count) from exiting event receive function to the - * point of this call - the current event group count is decremented - * immediately and if it reaches zero the notifications are also sent. In that - * case the group will be ready for a new apply after this returns. - * - * This impacts the current event group the same way whether it was activated - * by receiving a tagged event or EO called em_event_group_assign(). - * - * This call does not change potential atomicity or ordering for the current - * event and is a no-operation if called while an event group is not active - * (no current group). - * - * Can only be used within the EO receive function. - */ -void em_event_group_processing_end(void); - -/** - * Assign core local current event group. - * - * The assign functionality can be used to set the core local current event - * group. The event group handling after the assign call is identical to - * the handling of an event group that was originally set by sending an event - * tagged to that event group, i.e. the core local current event group - * is active and will be operated on in a normal way. - * Assign will fail if there already is an active current event group, i.e. - * only one event group can be active at a time (per core). - * - * This needs to be used with care, i.e. match the amount of events applied - * and assigned. - * - * @param event_group An applied event group to assign to - * - * @return EM_OK if assignment was successful - */ -em_status_t em_event_group_assign(em_event_group_t event_group); - -/** - * Abort the ongoing event group. - * - * This is a recovery operation to abort an ongoing event group in case it does - * not get completed. This will reset the group back to a state ready for - * a new apply. Note, that there is a potential race as the group could get - * completed on another thread while executing this (e.g. a delayed event is - * finally received and processed). Implementation will synchronize internal - * state changes, but this call may succeed or fail depending on timing so - * abort should be done with care for recovery purpose only. - * - * Notification events related to the ongoing (to be aborted) cycle can be - * managed as follows - * 1) save possible related notifications using em_event_group_get_notif() - * 2) call em_event_group_abort() - * 3) IF em_event_group_abort() returns EM_OK the operation was successfully - * completed meaning the earlier notifications will not be sent thus the - * saved notifications can be freed or re-used. Otherwise the call was made - * too late and the saved notifications must not be touched as they are to - * be sent. - * - * This means the synchronization point is em_event_group_abort(), not - * em_event_group_get_notif() which might return notifications that will still - * be sent. - * - * @attention Related notification events will not be automatically freed in - * any case and must be handled by the application. - * - * @param event_group Event group to abort and reset - * - * @return EM_OK if the call was made early enough to cleanly abort, i.e. - * before the last event was processed. EM_OK also means the - * notifications will not be sent. - */ -em_status_t em_event_group_abort(em_event_group_t event_group); - -/** - * Return notification events currently related to an applied event group. - * - * This returns the current notifications or none (0) if they were already sent - * (event group completed). - * - * @attention This is not a synchronization point, which means - * em_event_group_get_notif() could return notifications which - * are just going to be sent and thus should not be touched. - * - * @param event_group Event group - * @param max_notif Maximum number of notifications to return - * @param[out] notif_tbl Table for notifications to fill - * - * @return Number of returned notifications - * - * @see em_event_group_apply(), em_event_group_abort() - */ -int em_event_group_get_notif(em_event_group_t event_group, - int max_notif, em_notif_t notif_tbl[]); - -/** - * Initialize event group iteration and return the first event group handle. - * - * Can be used to initialize the iteration to retrieve all created event groups - * for debugging or management purposes. Use em_event_group_get_next() after - * this call until it returns EM_EVENT_GROUP_UNDEF. - * A new call to em_event_group_get_first() resets the iteration, which is - * maintained per core (thread). The operation should be completed in one go - * before returning from the EO's event receive function (or start/stop). - * - * The number of event groups (output arg 'num') may not match the amount of - * event groups actually returned by iterating using em_event_group_get_next() - * if event groups are added or removed in parallel by another core. The order - * of the returned event group handles is undefined. - * - * @code - * unsigned int num; - * em_event_group_t eg = em_event_group_get_first(&num); - * while (eg != EM_EVENT_GROUP_UNDEF) { - * eg = em_event_group_get_next(); - * } - * @endcode - * - * @param[out] num Pointer to an unsigned int to store the amount of - * event groups into - * @return The first event group handle or EM_EVENT_GROUP_UNDEF if none exist - * - * @see em_event_group_get_next() - **/ -em_event_group_t -em_event_group_get_first(unsigned int *num); - -/** - * Return the next event group handle. - * - * Continues the event group iteration started by em_event_group_get_first() and - * returns the next event group handle. - * - * @return The next event group handle or EM_EVENT_GROUP_UNDEF if the event - * group iteration is completed (i.e. no more event groups available). - * - * @see em_event_group_get_first() - **/ -em_event_group_t -em_event_group_get_next(void); - -/** - * Convert an event group handle to an unsigned integer - * - * @param event_group Event group handle to be converted - * @return uint64_t value that can be used to print/display the handle - * - * @note This routine is intended to be used for diagnostic purposes - * to enable applications to e.g. generate a printable value that represents - * an em_event_group_t handle. - */ -uint64_t em_event_group_to_u64(em_event_group_t event_group); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_EVENT_GROUP_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_EVENT_GROUP_H_ +#define EVENT_MACHINE_EVENT_GROUP_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_event_group Event group + * Event Machine fork-join helper. + * @{ + * + * An event group can be used to trigger a join of parallel operations in the + * form of notification events. The number of parallel operations needs to be + * known in advance by the event group creator, but the separate event handlers + * don't necessarily need to know anything about the other related events. + * An event group is functionally a shared atomic counter decremented when each + * related event has been handled (EO-receive() returns). The notification + * events are automatically sent once the count reaches zero. + * + * There are two separate main usage patterns: + * + * Sender originated (original): + * ---------------------------- + * 1. an event group is allocated with em_event_group_create(). + * + * 2. the number of parallel events and the notifications are set with + * em_event_group_apply(). + * + * 3. the (parallel) events are sent normally but using em_send_group() instead + * of em_send(). This tags the event with the given event group. + * + * 4. once received by a core the tag is used to switch core specific current + * event group to the one in the tag. The receiver EO handles the event + * normally (does not see any difference). + * + * 5. as the receive function returns the count of the current event group is + * decremented. If the count reaches zero (last event) the related + * notification event(s) are sent automatically and can trigger the next + * operation for the application. + * + * 6. the sequence can continue from step 2 for a new set of events if the + * event group is to be reused. + * + * Receiver originated (API 1.2): + * ----------------------------- + * 1. an event group is created with em_event_group_create(). + * + * 2. the number of parallel events and the notifications are set with + * em_event_group_apply(). + * + * 3. during the processing of any received event that is not already tagged to + * belong to an event group, em_event_group_assign() can be used to set the + * current event group (a core local value). The rest is then equivalent to + * as if the event was originally sent to an event group. + * + * 4. as the receive function returns the count of the current event group is + * decremented. If the count reaches zero (last event) the related + * notification event(s) are sent automatically and can trigger the next + * operation for the application. + * + * 5. the sequence can continue from step 2 for a new set of events if the + * event group is to be reused. + * + * From an application (EO) point of view, an event group can get activated + * either by entering the EO receive with an event tagged to an event group or + * by explicitly calling em_event_group_assign. The current event group is core + * local and only one event group can be active (current) at a time. + * Assigning a received event that already is tagged to an event group, e.g. + * sent with em_send_group(), is not allowed unless the event group is + * deactivated first with em_event_group_processing_end(). + * The current event group gets deactivated by exiting the EO receive function + * or by explicitly calling em_event_group_processing_end(). Deactivation means + * the count of the event group is decremented and if the count reaches zero + * the notification events are sent. + * The current event group is local to a core (dispatcher) and exists only + * within the EO receive function. + * + * Note, that event groups may only work with events that are to be handled by + * an EO, i.e. SW events. + * + * OpenEM implementation should internally use a generation count or other + * technique to make sure that em_event_group_abort() can stop a problem + * propagation, i.e. after a group is aborted (and applied a new count) any + * potential delayed event(s) from the previous cycle will not cause the new + * count to be decremented. + * The same should be valid for excess group events, i.e. when sending more + * than the applied count. + * To make it possible for the application to properly handle such problems, + * the implementation should pre-check incoming events and call error handler + * before giving the event to an EO. This makes it possible for the application + * to choose whether to drop those events (at the error handler) or let them be + * processed. + * + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group()/em_send_group_multi() to send a reference is wrong. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * Create a new event group for fork-join. + * + * The amount of simultaneous event groups can be limited. + * + * @return The new event group or EM_EVENT_GROUP_UNDEF if no event group is + * available. + * + * @see em_event_group_delete(), em_event_group_apply() + */ +em_event_group_t em_event_group_create(void); + +/** + * Delete (unallocate) an event group. + * + * An event group must not be deleted before it has been completed + * (count reached zero) or aborted. A created but never applied event group + * can be deleted. + * + * @param event_group Event group to delete + * + * @return EM_OK if successful. + * + * @see em_event_group_create(), em_event_group_abort() + */ +em_status_t em_event_group_delete(em_event_group_t event_group); + +/** + * Apply event group configuration. + * + * This function sets the event count and notification parameters for the event + * group. After it returns, events sent or assigned to the event group are + * counted against the current count value. Notification events are sent when + * all (counted) events have been processed (count is decremented at EO receive + * return or by calling em_event_group_processing_end()). A new apply call is + * needed to reuse the event group for another cycle (with a new count and + * notifications). + * + * Notification events can optionally be sent to/tagged with another event + * group but not with the same event group that triggered the notifications, + * see em_notif_t for more. + * + * @attention em_event_group_apply() can only be used on a newly created event + * group or when the previous cycle is completed or successfully aborted. + * Application can use em_event_group_is_ready() to detect whether apply is + * allowed but would normally use a notification to setup a new cycle + * (implementation must make sure that when any of the notifications is + * received the group is ready for new apply). + * + * Apply should only be called once per group cycle. + * + * @param event_group Event group + * @param count Number of events in the group (positive integer) + * @param num_notif Number of notification events to send + * @param notif_tbl Table of notifications (events and target queues) + * + * @return EM_OK if successful. + * + * @see em_event_group_create(), em_send_group(), em_event_group_is_ready(), + * em_notif_t + */ +em_status_t em_event_group_apply(em_event_group_t event_group, int count, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Increment the current event group count. + * + * Increments the event count of the currently active event group (received or + * assigned event). Enables sending new events into the current event group. + * The event count cannot be decremented and this will fail if there is no + * current event group. + * + * @param count Number of events to add to the event group (positive integer) + * + * @return EM_OK if successful. + * + * @see em_send_group(), em_event_group_apply() + */ +em_status_t em_event_group_increment(int count); + +/** + * Checks if the event group is ready for 'apply'. + * + * Returns EM_TRUE (1) if the given event group is ready, i.e. the user can do + * em_event_group_apply() again. A better alternative to this is to use a + * related notification event to reuse the event group (apply can always be + * used when handling a notification event from the event group). + * + * An event group that has been applied a count but no events sent is not + * considered 'ready for apply'. If a change is needed the group has to be + * aborted and then re-applied. + * + * Return value EM_TRUE does not guarantee all notifications are received nor + * handled, but the event group count has reached zero and the event group + * is ready for a new apply. + * + * @param event_group Event group + * + * @return EM_TRUE if the given event group is ready for apply + * + * @see em_event_group_create(), em_event_group_apply() + */ +int em_event_group_is_ready(em_event_group_t event_group); + +/** + * Return the currently active event group. + * + * Returns the current event group or EM_EVENT_GROUP_UNDEF if an event group is + * not active (i.e. never activated or deactivated using + * em_event_group_processing_end()). + * + * Can only be used within an EO receive function. + * + * @return Current event group or EM_EVENT_GROUP_UNDEF + * + * @see em_event_group_create() + */ +em_event_group_t em_event_group_current(void); + +/** + * Send event associated with/tagged to an event group. + * + * Any valid event and destination queue parameters can be used. The event + * group indicates which event group the event is tagged to. The event group + * has to first be created and applied a count. + * One should always send the correct amount of events to an event group, i.e. + * matching the applied count. + * + * Event group is not supported with unscheduled queues. + * + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group() to send a reference is wrong. + * + * @param event Event to send + * @param queue Destination queue + * @param event_group Event group + * + * @return EM_OK if successful. + * + * @see em_send(), em_event_group_create(), em_event_group_apply(), + * em_event_group_increment() + */ +em_status_t em_send_group(em_event_t event, em_queue_t queue, + em_event_group_t event_group); + +/** + * Send multiple events associated with/tagged to an event group. + * + * This is like em_send_group, but multiple events can be sent with one call + * for potential performance gain. + * The call returns the number of events actually sent. A return value equal to + * 'num' means that all events were sent. A value less than 'num' means the + * events at the end of the given event list were not sent and must be handled + * by the application. + * The function will not modify the given list of events. + * + * Event group is not supported with unscheduled queues. + * + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group_multi() to send references is wrong. + * + * @param events List of events to send (i.e. ptr to array of events) + * @param num Number of events + * @param queue Destination queue + * @param event_group Event group + * + * @return number of events successfully sent (equal to num if all successful) + * + * @see em_send_group() + */ +int em_send_group_multi(const em_event_t events[], int num, em_queue_t queue, + em_event_group_t event_group); + +/** + * Signal early end of processing of the current event group + * + * This is an optional call that can be used to move the implicit event group + * handling (decrementing the count) from exiting event receive function to the + * point of this call - the current event group count is decremented + * immediately and if it reaches zero the notifications are also sent. In that + * case the group will be ready for a new apply after this returns. + * + * This impacts the current event group the same way whether it was activated + * by receiving a tagged event or EO called em_event_group_assign(). + * + * This call does not change potential atomicity or ordering for the current + * event and is a no-operation if called while an event group is not active + * (no current group). + * + * Can only be used within the EO receive function. + */ +void em_event_group_processing_end(void); + +/** + * Assign core local current event group. + * + * The assign functionality can be used to set the core local current event + * group. The event group handling after the assign call is identical to + * the handling of an event group that was originally set by sending an event + * tagged to that event group, i.e. the core local current event group + * is active and will be operated on in a normal way. + * Assign will fail if there already is an active current event group, i.e. + * only one event group can be active at a time (per core). + * + * This needs to be used with care, i.e. match the amount of events applied + * and assigned. + * + * @param event_group An applied event group to assign to + * + * @return EM_OK if assignment was successful + */ +em_status_t em_event_group_assign(em_event_group_t event_group); + +/** + * Abort the ongoing event group. + * + * This is a recovery operation to abort an ongoing event group in case it does + * not get completed. This will reset the group back to a state ready for + * a new apply. Note, that there is a potential race as the group could get + * completed on another thread while executing this (e.g. a delayed event is + * finally received and processed). Implementation will synchronize internal + * state changes, but this call may succeed or fail depending on timing so + * abort should be done with care for recovery purpose only. + * + * Notification events related to the ongoing (to be aborted) cycle can be + * managed as follows + * 1) save possible related notifications using em_event_group_get_notif() + * 2) call em_event_group_abort() + * 3) IF em_event_group_abort() returns EM_OK the operation was successfully + * completed meaning the earlier notifications will not be sent thus the + * saved notifications can be freed or reused. Otherwise the call was made + * too late and the saved notifications must not be touched as they are to + * be sent. + * + * This means the synchronization point is em_event_group_abort(), not + * em_event_group_get_notif() which might return notifications that will still + * be sent. + * + * @attention Related notification events will not be automatically freed in + * any case and must be handled by the application. + * + * @param event_group Event group to abort and reset + * + * @return EM_OK if the call was made early enough to cleanly abort, i.e. + * before the last event was processed. EM_OK also means the + * notifications will not be sent. + */ +em_status_t em_event_group_abort(em_event_group_t event_group); + +/** + * Return notification events currently related to an applied event group. + * + * This returns the current notifications or none (0) if they were already sent + * (event group completed). + * + * @attention This is not a synchronization point, which means + * em_event_group_get_notif() could return notifications which + * are just going to be sent and thus should not be touched. + * + * @param event_group Event group + * @param max_notif Maximum number of notifications to return + * @param[out] notif_tbl Table for notifications to fill + * + * @return Number of returned notifications + * + * @see em_event_group_apply(), em_event_group_abort() + */ +int em_event_group_get_notif(em_event_group_t event_group, + int max_notif, em_notif_t notif_tbl[]); + +/** + * Initialize event group iteration and return the first event group handle. + * + * Can be used to initialize the iteration to retrieve all created event groups + * for debugging or management purposes. Use em_event_group_get_next() after + * this call until it returns EM_EVENT_GROUP_UNDEF. + * A new call to em_event_group_get_first() resets the iteration, which is + * maintained per core (thread). The operation should be completed in one go + * before returning from the EO's event receive function (or start/stop). + * + * The number of event groups (output arg 'num') may not match the amount of + * event groups actually returned by iterating using em_event_group_get_next() + * if event groups are added or removed in parallel by another core. The order + * of the returned event group handles is undefined. + * + * @code + * unsigned int num; + * em_event_group_t eg = em_event_group_get_first(&num); + * while (eg != EM_EVENT_GROUP_UNDEF) { + * eg = em_event_group_get_next(); + * } + * @endcode + * + * @param[out] num Pointer to an unsigned int to store the amount of + * event groups into + * @return The first event group handle or EM_EVENT_GROUP_UNDEF if none exist + * + * @see em_event_group_get_next() + **/ +em_event_group_t +em_event_group_get_first(unsigned int *num); + +/** + * Return the next event group handle. + * + * Continues the event group iteration started by em_event_group_get_first() and + * returns the next event group handle. + * + * @return The next event group handle or EM_EVENT_GROUP_UNDEF if the event + * group iteration is completed (i.e. no more event groups available). + * + * @see em_event_group_get_first() + **/ +em_event_group_t +em_event_group_get_next(void); + +/** + * Convert an event group handle to an unsigned integer + * + * @param event_group Event group handle to be converted + * @return uint64_t value that can be used to print/display the handle + * + * @note This routine is intended to be used for diagnostic purposes + * to enable applications to e.g. generate a printable value that represents + * an em_event_group_t handle. + */ +uint64_t em_event_group_to_u64(em_event_group_t event_group); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_EVENT_GROUP_H_ */ diff --git a/include/event_machine/api/event_machine_queue_group.h b/include/event_machine/api/event_machine_queue_group.h index f87d60dc..3a276b3a 100644 --- a/include/event_machine/api/event_machine_queue_group.h +++ b/include/event_machine/api/event_machine_queue_group.h @@ -1,409 +1,409 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_QUEUE_GROUP_H_ -#define EVENT_MACHINE_QUEUE_GROUP_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_queue_group Queue group - * Operations on queue groups - * - * A queue group is basically a set of cores (threads) within an EM instance - * allowed to receive events from a queue belonging to that queue group. - * - * @{ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -/** - * Create a new queue group to control queue to core mapping, - * asynchronous (non-blocking) - * - * Allocates a new queue group handle with a given core mask. - * Cores added to the queue group can be changed later with - * em_queue_group_modify(). - * - * This operation may be asynchronous, i.e. the creation may complete well after - * this function has returned. Provide notification events, if the application - * needs to know about the actual completion. EM will send notifications when - * the operation has completed. Note that using a queue group before the - * creation has completed may result in undefined behaviour. - * - * The core mask is visible through em_queue_group_get_mask() only after the - * create operation has completed. - * - * Note, that the operation can also happen one core at a time, so an - * intermediate mask may be active momentarily. - * - * Only manipulate the core mask with the access macros defined in - * event_machine_hw_specific.h as the implementation underneath may change. - * - * The given name is copied up to the maximum length of EM_QUEUE_GROUP_NAME_LEN. - * Duplicate names are allowed, but find will then only return the first match. - * The name "default" is reserved for EM_QUEUE_GROUP_DEFAULT. - * - * EM has a default group EM_QUEUE_GROUP_DEFAULT containing all cores running - * this EM instance. It's named "default". - * - * Some systems may have a low number of queue groups available. - * - * @attention Only call em_queue_create() after em_queue_group_create() has - * completed - use notifications to synchronize. Alternatively use - * em_queue_group_create_sync() to be able to create the queue - * directly after creating the queue group in the source code. - * - * @param name Queue group name (optional, NULL ok) - * @param mask Core mask for the queue group - * @param num_notif Number of entries in notif_tbl (use 0 for no notification) - * @param notif_tbl Array of notifications to send as the operation completes - * - * @return Queue group or EM_QUEUE_GROUP_UNDEF on error. - * - * @see em_queue_group_find(), em_queue_group_modify(), em_queue_group_delete(), - * em_queue_group_create_sync() - */ -em_queue_group_t -em_queue_group_create(const char *name, const em_core_mask_t *mask, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Create a new queue group to control queue to core mapping, - * synchronous (blocking). - * - * As em_queue_group_create(), but will not return until the operation is - * complete. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param name Queue group name (optional, NULL ok) - * @param mask Core mask for the queue group - * - * @return Queue group or EM_QUEUE_GROUP_UNDEF on error. - * - * @see em_queue_group_create() for an asynchronous version of the API - */ -em_queue_group_t -em_queue_group_create_sync(const char *name, const em_core_mask_t *mask); - -/** - * Delete the queue group, asynchronous (non-blocking) - * - * Removes all cores from the queue group and free's the handle for re-use. - * All queues in the queue group must be deleted with em_queue_delete() before - * deleting the queue group. - * - * @param queue_group Queue group to delete - * @param num_notif Number of entries in notif_tbl (0 for no notification) - * @param notif_tbl Array of notifications to send as the operation completes - * - * @return EM_OK if successful. - * - * @see em_queue_group_create(), em_queue_group_modify(), em_queue_delete(), - * em_queue_group_delete_sync() - */ -em_status_t -em_queue_group_delete(em_queue_group_t queue_group, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Delete the queue group, synchronous (blocking). - * - * As em_queue_group_delete(), but will not return until the operation is - * complete. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param queue_group Queue group to delete - * - * @return EM_OK if successful. - * - * @see em_queue_group_delete() for an asynchronous version of the API - */ -em_status_t -em_queue_group_delete_sync(em_queue_group_t queue_group); - -/** - * Modify the core mask of an existing queue group, asynchronous (non-blocking) - * - * The function compares the new core mask to the current mask and changes the - * core mapping for the given queue group accordingly. - * - * This operation may be asynchronous, i.e. the change may complete well after - * this function has returned. Provide notification events, if the application - * needs to know about the actual completion. EM will send notifications when - * the operation has completed. - * - * The new core mask is visible through em_queue_group_get_mask() only after - * the modify operation has completed. - * - * Note, that depending on the system, the change can also happen one core at - * a time, so an intermediate mask may be active momentarily. - * - * Only manipulate core mask with the access macros defined in - * event_machine_hw_specific.h as the implementation underneath may change. - * - * @param queue_group Queue group to modify - * @param new_mask New core mask - * @param num_notif Number of entries in notif_tbl (0 for no notification) - * @param notif_tbl Array of notifications to send as the operation completes - * - * @return EM_OK if successful. - * - * @see em_queue_group_create(), em_queue_group_find(), em_queue_group_delete() - * em_queue_group_get_mask(), em_queue_group_modify_sync() - */ -em_status_t -em_queue_group_modify(em_queue_group_t queue_group, - const em_core_mask_t *new_mask, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Modify core mask of an existing queue group, synchronous (blocking). - * - * As em_queue_group_modify(), but will not return until the operation is - * complete. - * - * Note that the function is blocking and will not return until the operation - * has completed across all concerned EM cores. - * Sync-API calls can block the core for a long (indefinite) time, thus they - * should not be used to make runtime changes on real time EM cores - consider - * the async variants of the APIs in these cases instead. - * While one core is calling a sync-API function, the others must be running the - * EM dispatch loop to be able to receive and handle the sync-API request events - * sent internally. - * Use the sync-APIs mainly to simplify application start-up or teardown. - * - * @param queue_group Queue group to modify - * @param new_mask New core mask - * - * @return EM_OK if successful. - * - * @see em_queue_group_modify() for an asynchronous version of the API - */ -em_status_t -em_queue_group_modify_sync(em_queue_group_t queue_group, - const em_core_mask_t *new_mask); - -/** - * Finds a queue group by name. - * - * Finds a queue group by the given name (exact match). An empty string will not - * match anything. The search is case sensitive. If there are duplicate names, - * this will return the first match only. - * - * @param name Name of the queue qroup to find - * - * @return Queue group or EM_QUEUE_GROUP_UNDEF if not found - * - * @see em_queue_group_create() - */ -em_queue_group_t -em_queue_group_find(const char *name); - -/** - * Get the current core mask for a queue group. - * - * This returns the situation at the moment of the inquiry. The result may not - * be up-to-date if another core is modifying the queue group at the same time. - * The application may need to synchronize group modifications. - * - * @param queue_group Queue group - * @param mask Core mask for the queue group - * - * @return EM_OK if successful. - * - * @see em_queue_group_create(), em_queue_group_modify() - */ -em_status_t -em_queue_group_get_mask(em_queue_group_t queue_group, em_core_mask_t *mask); - -/** - * Get the name of a queue group. - * - * A copy of the name string (up to 'maxlen' characters) is written to the user - * given buffer. The string is always null terminated, even if the given buffer - * length is less than the name length. - * - * The function returns '0' and writes an empty string if the queue group has - * no name. - * - * @param queue_group Queue group id - * @param[out] name Destination buffer - * @param maxlen Maximum length (including the terminating '\0') - * - * @return Number of characters written (excludes the terminating '\0'). - */ -size_t -em_queue_group_get_name(em_queue_group_t queue_group, - char *name, size_t maxlen); - -/** - * Initialize queue group iteration and return the first queue group handle. - * - * Can be used to initialize the iteration to retrieve all created queue groups - * for debugging or management purposes. Use em_queue_group_get_next() after - * this call until it returns EM_QUEUE_GROUP_UNDEF. - * A new call to em_queue_group_get_first() resets the iteration, which is - * maintained per core (thread). The operation should be completed in one go - * before returning from the EO's event receive function (or start/stop). - * - * The number of queue groups (output arg 'num') may not match the amount of - * queue groups actually returned by iterating using em_event_group_get_next() - * if queue groups are added or removed in parallel by another core. The order - * of the returned queue group handles is undefined. - * - * @code - * unsigned int num; - * em_queue_group_t qg = em_queue_group_get_first(&num); - * while (qg != EM_QUEUE_GROUP_UNDEF) { - * qg = em_queue_group_get_next(); - * } - * @endcode - * - * @param[out] num Pointer to an unsigned int to store the amount of - * queue groups into - * @return The first queue group handle or EM_QUEUE_GROUP_UNDEF if none exist - * - * @see em_queue_group_get_next() - **/ -em_queue_group_t -em_queue_group_get_first(unsigned int *num); - -/** - * Continues the queue group iteration started by em_queue_group_get_first() and - * returns the next queue group handle. - * - * @return The next queue group handle or EM_QUEUE_GROUP_UNDEF if the queue - * group iteration is completed (i.e. no more queue groups available). - * - * @see em_queue_group_get_first() - **/ -em_queue_group_t -em_queue_group_get_next(void); - -/** - * Initialize iteration of a queue group's queues and return the first - * queue handle. - * - * Can be used to initialize the iteration to retrieve all queues associated - * with the given queue group for debugging or management purposes. - * Use em_queue_group_queue_get_next() after this call until it returns - * EM_QUEUE_UNDEF. - * A new call to em_queue_group_queue_get_first() resets the iteration, which is - * maintained per core (thread). The operation should be started and completed - * in one go before returning from the EO's event receive function (or - * start/stop). - * - * The number of queues in the queue group (output arg 'num') may not match the - * amount of queues actually returned by iterating using - * em_queue_group_queue_get_next() if queues are added or removed in parallel by - * another core. The order of the returned queue handles is undefined. - * - * Simplified example: - * @code - * unsigned int num; - * em_queue_t q = em_queue_group_queue_get_first(&num, queue_group); - * while (q != EM_QUEUE_UNDEF) { - * q = em_queue_group_queue_get_next(); - * } - * @endcode - * - * @param[out] num Pointer to an unsigned int to store the amount of - * queue groups into. - * @param queue_group Queue group handle - * - * @return The first queue handle or EM_QUEUE_UNDEF if none exist or the - * queue group is invalid. - * - * @see em_queue_group_queue_get_next() - **/ -em_queue_t -em_queue_group_queue_get_first(unsigned int *num, em_queue_group_t queue_group); - -/** - * Return the queue group's next queue handle. - * - * Continues the queue iteration started by em_queue_group_queue_get_first() and - * returns the next queue handle in the queue group. - * - * @return The next queue handle or EM_QUEUE_UNDEF if the queue iteration is - * completed (i.e. no more queues available for this queue group). - * - * @see em_queue_group_queue_get_first() - **/ -em_queue_t -em_queue_group_queue_get_next(void); - -/** - * Convert a queue_group handle to an unsigned integer - * - * @param queue_group queue_group handle to be converted - * @return uint64_t value that can be used to print/display the handle - * - * @note This routine is intended to be used for diagnostic purposes - * to enable applications to e.g. generate a printable value that represents - * an em_queue_group_t handle. - */ -uint64_t em_queue_group_to_u64(em_queue_group_t queue_group); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_QUEUE_GROUP_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_QUEUE_GROUP_H_ +#define EVENT_MACHINE_QUEUE_GROUP_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_queue_group Queue group + * Operations on queue groups + * + * A queue group is basically a set of cores (threads) within an EM instance + * allowed to receive events from a queue belonging to that queue group. + * + * @{ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * Create a new queue group to control queue to core mapping, + * asynchronous (non-blocking) + * + * Allocates a new queue group handle with a given core mask. + * Cores added to the queue group can be changed later with + * em_queue_group_modify(). + * + * This operation may be asynchronous, i.e. the creation may complete well after + * this function has returned. Provide notification events, if the application + * needs to know about the actual completion. EM will send notifications when + * the operation has completed. Note that using a queue group before the + * creation has completed may result in undefined behaviour. + * + * The core mask is visible through em_queue_group_get_mask() only after the + * create operation has completed. + * + * Note, that the operation can also happen one core at a time, so an + * intermediate mask may be active momentarily. + * + * Only manipulate the core mask with the access macros defined in + * event_machine_hw_specific.h as the implementation underneath may change. + * + * The given name is copied up to the maximum length of EM_QUEUE_GROUP_NAME_LEN. + * Duplicate names are allowed, but find will then only return the first match. + * The name "default" is reserved for EM_QUEUE_GROUP_DEFAULT. + * + * EM has a default group EM_QUEUE_GROUP_DEFAULT containing all cores running + * this EM instance. It's named "default". + * + * Some systems may have a low number of queue groups available. + * + * @attention Only call em_queue_create() after em_queue_group_create() has + * completed - use notifications to synchronize. Alternatively use + * em_queue_group_create_sync() to be able to create the queue + * directly after creating the queue group in the source code. + * + * @param name Queue group name (optional, NULL ok) + * @param mask Core mask for the queue group + * @param num_notif Number of entries in notif_tbl (use 0 for no notification) + * @param notif_tbl Array of notifications to send as the operation completes + * + * @return Queue group or EM_QUEUE_GROUP_UNDEF on error. + * + * @see em_queue_group_find(), em_queue_group_modify(), em_queue_group_delete(), + * em_queue_group_create_sync() + */ +em_queue_group_t +em_queue_group_create(const char *name, const em_core_mask_t *mask, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Create a new queue group to control queue to core mapping, + * synchronous (blocking). + * + * As em_queue_group_create(), but will not return until the operation is + * complete. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param name Queue group name (optional, NULL ok) + * @param mask Core mask for the queue group + * + * @return Queue group or EM_QUEUE_GROUP_UNDEF on error. + * + * @see em_queue_group_create() for an asynchronous version of the API + */ +em_queue_group_t +em_queue_group_create_sync(const char *name, const em_core_mask_t *mask); + +/** + * Delete the queue group, asynchronous (non-blocking) + * + * Removes all cores from the queue group and free's the handle for reuse. + * All queues in the queue group must be deleted with em_queue_delete() before + * deleting the queue group. + * + * @param queue_group Queue group to delete + * @param num_notif Number of entries in notif_tbl (0 for no notification) + * @param notif_tbl Array of notifications to send as the operation completes + * + * @return EM_OK if successful. + * + * @see em_queue_group_create(), em_queue_group_modify(), em_queue_delete(), + * em_queue_group_delete_sync() + */ +em_status_t +em_queue_group_delete(em_queue_group_t queue_group, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Delete the queue group, synchronous (blocking). + * + * As em_queue_group_delete(), but will not return until the operation is + * complete. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param queue_group Queue group to delete + * + * @return EM_OK if successful. + * + * @see em_queue_group_delete() for an asynchronous version of the API + */ +em_status_t +em_queue_group_delete_sync(em_queue_group_t queue_group); + +/** + * Modify the core mask of an existing queue group, asynchronous (non-blocking) + * + * The function compares the new core mask to the current mask and changes the + * core mapping for the given queue group accordingly. + * + * This operation may be asynchronous, i.e. the change may complete well after + * this function has returned. Provide notification events, if the application + * needs to know about the actual completion. EM will send notifications when + * the operation has completed. + * + * The new core mask is visible through em_queue_group_get_mask() only after + * the modify operation has completed. + * + * Note, that depending on the system, the change can also happen one core at + * a time, so an intermediate mask may be active momentarily. + * + * Only manipulate core mask with the access macros defined in + * event_machine_hw_specific.h as the implementation underneath may change. + * + * @param queue_group Queue group to modify + * @param new_mask New core mask + * @param num_notif Number of entries in notif_tbl (0 for no notification) + * @param notif_tbl Array of notifications to send as the operation completes + * + * @return EM_OK if successful. + * + * @see em_queue_group_create(), em_queue_group_find(), em_queue_group_delete() + * em_queue_group_get_mask(), em_queue_group_modify_sync() + */ +em_status_t +em_queue_group_modify(em_queue_group_t queue_group, + const em_core_mask_t *new_mask, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Modify core mask of an existing queue group, synchronous (blocking). + * + * As em_queue_group_modify(), but will not return until the operation is + * complete. + * + * Note that the function is blocking and will not return until the operation + * has completed across all concerned EM cores. + * Sync-API calls can block the core for a long (indefinite) time, thus they + * should not be used to make runtime changes on real time EM cores - consider + * the async variants of the APIs in these cases instead. + * While one core is calling a sync-API function, the others must be running the + * EM dispatch loop to be able to receive and handle the sync-API request events + * sent internally. + * Use the sync-APIs mainly to simplify application start-up or teardown. + * + * @param queue_group Queue group to modify + * @param new_mask New core mask + * + * @return EM_OK if successful. + * + * @see em_queue_group_modify() for an asynchronous version of the API + */ +em_status_t +em_queue_group_modify_sync(em_queue_group_t queue_group, + const em_core_mask_t *new_mask); + +/** + * Finds a queue group by name. + * + * Finds a queue group by the given name (exact match). An empty string will not + * match anything. The search is case sensitive. If there are duplicate names, + * this will return the first match only. + * + * @param name Name of the queue qroup to find + * + * @return Queue group or EM_QUEUE_GROUP_UNDEF if not found + * + * @see em_queue_group_create() + */ +em_queue_group_t +em_queue_group_find(const char *name); + +/** + * Get the current core mask for a queue group. + * + * This returns the situation at the moment of the inquiry. The result may not + * be up-to-date if another core is modifying the queue group at the same time. + * The application may need to synchronize group modifications. + * + * @param queue_group Queue group + * @param mask Core mask for the queue group + * + * @return EM_OK if successful. + * + * @see em_queue_group_create(), em_queue_group_modify() + */ +em_status_t +em_queue_group_get_mask(em_queue_group_t queue_group, em_core_mask_t *mask); + +/** + * Get the name of a queue group. + * + * A copy of the name string (up to 'maxlen' characters) is written to the user + * given buffer. The string is always null terminated, even if the given buffer + * length is less than the name length. + * + * The function returns '0' and writes an empty string if the queue group has + * no name. + * + * @param queue_group Queue group id + * @param[out] name Destination buffer + * @param maxlen Maximum length (including the terminating '\0') + * + * @return Number of characters written (excludes the terminating '\0'). + */ +size_t +em_queue_group_get_name(em_queue_group_t queue_group, + char *name, size_t maxlen); + +/** + * Initialize queue group iteration and return the first queue group handle. + * + * Can be used to initialize the iteration to retrieve all created queue groups + * for debugging or management purposes. Use em_queue_group_get_next() after + * this call until it returns EM_QUEUE_GROUP_UNDEF. + * A new call to em_queue_group_get_first() resets the iteration, which is + * maintained per core (thread). The operation should be completed in one go + * before returning from the EO's event receive function (or start/stop). + * + * The number of queue groups (output arg 'num') may not match the amount of + * queue groups actually returned by iterating using em_event_group_get_next() + * if queue groups are added or removed in parallel by another core. The order + * of the returned queue group handles is undefined. + * + * @code + * unsigned int num; + * em_queue_group_t qg = em_queue_group_get_first(&num); + * while (qg != EM_QUEUE_GROUP_UNDEF) { + * qg = em_queue_group_get_next(); + * } + * @endcode + * + * @param[out] num Pointer to an unsigned int to store the amount of + * queue groups into + * @return The first queue group handle or EM_QUEUE_GROUP_UNDEF if none exist + * + * @see em_queue_group_get_next() + **/ +em_queue_group_t +em_queue_group_get_first(unsigned int *num); + +/** + * Continues the queue group iteration started by em_queue_group_get_first() and + * returns the next queue group handle. + * + * @return The next queue group handle or EM_QUEUE_GROUP_UNDEF if the queue + * group iteration is completed (i.e. no more queue groups available). + * + * @see em_queue_group_get_first() + **/ +em_queue_group_t +em_queue_group_get_next(void); + +/** + * Initialize iteration of a queue group's queues and return the first + * queue handle. + * + * Can be used to initialize the iteration to retrieve all queues associated + * with the given queue group for debugging or management purposes. + * Use em_queue_group_queue_get_next() after this call until it returns + * EM_QUEUE_UNDEF. + * A new call to em_queue_group_queue_get_first() resets the iteration, which is + * maintained per core (thread). The operation should be started and completed + * in one go before returning from the EO's event receive function (or + * start/stop). + * + * The number of queues in the queue group (output arg 'num') may not match the + * amount of queues actually returned by iterating using + * em_queue_group_queue_get_next() if queues are added or removed in parallel by + * another core. The order of the returned queue handles is undefined. + * + * Simplified example: + * @code + * unsigned int num; + * em_queue_t q = em_queue_group_queue_get_first(&num, queue_group); + * while (q != EM_QUEUE_UNDEF) { + * q = em_queue_group_queue_get_next(); + * } + * @endcode + * + * @param[out] num Pointer to an unsigned int to store the amount of + * queue groups into. + * @param queue_group Queue group handle + * + * @return The first queue handle or EM_QUEUE_UNDEF if none exist or the + * queue group is invalid. + * + * @see em_queue_group_queue_get_next() + **/ +em_queue_t +em_queue_group_queue_get_first(unsigned int *num, em_queue_group_t queue_group); + +/** + * Return the queue group's next queue handle. + * + * Continues the queue iteration started by em_queue_group_queue_get_first() and + * returns the next queue handle in the queue group. + * + * @return The next queue handle or EM_QUEUE_UNDEF if the queue iteration is + * completed (i.e. no more queues available for this queue group). + * + * @see em_queue_group_queue_get_first() + **/ +em_queue_t +em_queue_group_queue_get_next(void); + +/** + * Convert a queue_group handle to an unsigned integer + * + * @param queue_group queue_group handle to be converted + * @return uint64_t value that can be used to print/display the handle + * + * @note This routine is intended to be used for diagnostic purposes + * to enable applications to e.g. generate a printable value that represents + * an em_queue_group_t handle. + */ +uint64_t em_queue_group_to_u64(em_queue_group_t queue_group); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_QUEUE_GROUP_H_ */ diff --git a/include/event_machine/api/event_machine_timer.h b/include/event_machine/api/event_machine_timer.h index 1c004b4b..be9f0751 100644 --- a/include/event_machine/api/event_machine_timer.h +++ b/include/event_machine/api/event_machine_timer.h @@ -1,1129 +1,1147 @@ -/* - * Copyright (c) 2016-2024, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef EVENT_MACHINE_TIMER_H_ -#define EVENT_MACHINE_TIMER_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * Event Machine timer - * @defgroup em_timer Event timer - * Event Machine timer - * @{ - * - * The timer API can be used to request an event to be sent to a specified - * queue at a specified time once (one-shot) or at regular intervals (periodic). - * A timer needs to be created first - it represents a collection of timeouts - * with certain attributes (e.g. timeout resolution and maximum period). - * A timer can be mapped to a HW resource on an SoC, thus the number of timers, - * capabilities and time bases are system specific. Typically only a few - * timers are supported. - * The application can specify required capabilities when a timer is created. - * The creation will fail if the implementation cannot fulfill the required - * values. Timers are typically created once at system startup. - * - * A timer is a shared resource with proper synchronization for concurrent - * multi-thread use. It is possible to exclude all multi-thread protections if a - * timer is used exclusively by a single thread (for potential performance - * gains). This is done by setting EM_TIMER_FLAG_PRIVATE when creating a timer. - * Setting this flag means that the application must ensure that only a single - * thread is using the timer (this also includes the receiver of periodic - * timeouts due to the ack-functionality). This private-mode is not necessarily - * implemented on all systems, in which case the flag is ignored as it will not - * cause any functional difference. - * - * Timeouts (tmo) can be created once a timer exists. Creating a timeout - * allocates the resources needed to serve the timeout, but does not arm it. - * This makes it possible to pre-create timeout(s) and set the expiry - * later at runtime. This can improve performance but also minimizes the - * possibility that the runtime call setting the expiry would fail, as resources - * have already been reserved beforehand. - * - * A pending timeout can be cancelled. Note that there is no way to cancel an - * expired timeout for which the event has already been sent but not yet - * received by the application. Canceling in this case will return an error - * to enable the application to detect the situation. For a periodic timer, - * a cancel will stop further timeouts, but may not be able to prevent the - * latest event from being received. An active timeout cannot be altered without - * canceling it first. - * - * A timeout can be re-used after it has been received or successfully - * cancelled. Timeouts need to be deleted after use. Deletion frees the - * resources reserved during creation. - * - * The timeout value is an abstract system and timer dependent tick count. - * It is assumed that the tick count increases with a static frequency. - * The frequency can be inquired at runtime for time calculations, e.g. tick - * frequency divided by 1000 gives ticks for 1ms. Tick frequency is at least - * equal to the resolution, but can also be higher (implementation can quantize - * ticks to any underlying implementation). Supported resolution can also be - * inquired. - * A clock source can be specified when creating a timer. It defines the time - * base of the timer for systems with multiple sources implemented (optional). - * EM_TIMER_CLKSRC_DEFAULT is a portable value that implements a basic - * monotonic time, that will not wrap back to zero in any reasonable uptime. - * - * The major event types EM_EVENT_TYPE_SW, EM_EVENT_TYPE_PACKET and - * EM_EVENT_TYPE_TIMER can be used as a timeout indication. The type - * EM_EVENT_TYPE_TIMER is alternative to EM_EVENT_TYPE_SW and works the same - * way. Additionally for ring timer only, the type EM_EVENT_TYPE_TIMER_IND - * is used. This is a special indication event without visible payload. - * - * A periodic timer requires the application to acknowledge each received - * timeout event after it has been processed. The acknowledgment activates the - * next timeout and compensates for the processing delay to keep the original - * interval. This creates a flow control mechanism and also protects the event - * handling from races if the same event is reused every time, the next - * timeout will not be sent before the previous has been acknowledged. - * The event to be received for each periodic timeout can also be different as - * the next event is given by the application with the acknowledge. - * The target queue cannot be modified after the timeout has been created. - * - * If the acknowledgment of a periodic timeout is done too late (after the next - * period has already passed), the default action is to skip the missed timeout - * slot(s) and arm for the next valid slot. If the application never wants to - * skip a missed timeout it can set the flag EM_TMO_FLAG_NOSKIP when creating a - * timeout. This causes each acknowledgment to schedule an immediate timeout - * event until all the missed time slots have been served. This keeps the number - * of timeouts as expected but may cause an event storm if a long processing - * delay has occurred. - * - * The timeout handle is needed when acknowledging a periodic timeout event. - * Because any event can be used for the timeout, the application must itself - * provide a way to derive the timeout handle from the received timeout event. - * A typical way is to include the tmo handle within the timeout event. - * Application also needs to have a mechanism to detect which event is a - * periodic timeout to be able to call acknowledge. - * - * If the timeout tick value given to timeout start points to the past or is too - * close to current time then error code EM_ERR_TOONEAR is returned. In this - * case EM will not call error handler to let application decide whether - * it is an error or if it will try again with updated target time. - * - * There is an alternative periodic ring timer. As it uses different abstraction - * it is created and started via separate ring specific APIs. It has three main - * differencies to the regular periodic timeouts: - * 1. Only a pre-defined read-only event type can be used and is provided - * by the timer (EM_EVENT_TYPE_TIMER_IND). - * 2. Flow control is not supported. Some implementations may have it, - * but the specification does not quarantee any so the user needs to be - * prepared to see the same event enqueued multiple times if handling of - * the received timeouts is not fast enough - * 2. A limited set of period times are supported per timer (base rate or - * an integer multiple of it only) - * - * Ring timers can be abstracted as a clock face ticking the pointer forward. - * One cycle around is the base rate (minimum rate). The same timeout can be - * inserted into multiple locations evenly spread within the clock face thus - * multiplying the base rate. The starting offset can be adjusted only up to - * one timeout period. - * Depending on platform, this mode may provide better integration with HW and - * thus have less runtime overhead. However, as it exposes a potential queue - * overflow and a race hazard (race avoidable by using atomic queue as target), - * the regular periodic timer is recommended as a default. - * - * Example usage - * @code - * - * // This would typically be done at application init. - * // Accept all defaults but change the name - * em_timer_attr_t attr; - * em_timer_attr_init(&attr); - * strncpy(attr.name, "myTimer", EM_TIMER_NAME_LEN); - * em_timer_t tmr = em_timer_create(&attr); - * if(tmr == EM_TIMER_UNDEF) { - * // handle error here or via error handler - * } - * - * // At runtime - create a timeout resource. - * // Can be done in advance to save time if the target queue is known. - * em_tmo_t tmo = em_tmo_create(tmr, EM_TIMER_FLAG_ONESHOT, target_queue); - * if(tmo == EM_TMO_UNDEF) { - * // no such timer or out of resources - * // handle error here or via error handler - * } - * - * // Get the timer tick frequency - * uint64_t hz = em_timer_get_freq(tmr); - * - * // Activate a 10ms timeout from now. - * // Very unlikely to fail with valid arguments. - * if (em_tmo_set_rel(tmo, hz / 100, my_tmo_event) != EM_OK) { - * // handle error here or via error handler - * } - * - * @endcode - * - */ -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** Deprecated - * Major EM Timer API version. Marks possibly backwards incompatible changes. - * EM timer is now part of EM API. Use EM_API_VERSION_MAJOR instead. - */ -#define EM_TIMER_API_VERSION_MAJOR EM_API_VERSION_MAJOR -/** Deprecated - * Minor EM Timer API version. Marks possibly backwards incompatible changes. - * EM Timer is now part of EM API. Use EM_API_VERSION_MINOR instead. - */ -#define EM_TIMER_API_VERSION_MINOR EM_API_VERSION_MINOR - -/** - * @typedef em_timer_t - * System specific type for a timer handle. - */ - -/** - * @typedef em_tmo_t - * System specific type for a timeout handle. - */ - -/** - * @typedef em_timer_flag_t - * System specific type for timer flags. - * This is system specific, but all implementations must define - * EM_TIMER_FLAG_DEFAULT and EM_TIMER_FLAG_PRIVATE, of which the latter is used - * to skip API synchronization for single threaded apps. - * Flags can be combined by bitwise OR. - */ - -/** - * @typedef em_tmo_flag_t - * System specific enum type for timeout flags. - * This is system specific, but all implementations must define - * EM_TMO_FLAG_ONESHOT, EM_TMO_FLAG_PERIODIC and EM_TMO_FLAG_NOSKIP. - * Flags can be combined by bitwise OR. - */ - -/** - * @typedef em_timer_clksrc_t - * System specific enum type for timer clock source. - * This is system specific, but all implementations must define - * EM_TIMER_CLKSRC_DEFAULT. - */ - -/** - * Visible state of a timeout - */ -typedef enum em_tmo_state_t { - EM_TMO_STATE_UNKNOWN = 0, - EM_TMO_STATE_IDLE = 1, /**< just created or canceled */ - EM_TMO_STATE_ACTIVE = 2, /**< armed */ - EM_TMO_STATE_INACTIVE = 3 /**< oneshot expired */ -} em_tmo_state_t; - -/** - * Type returned by em_tmo_get_type() - */ -typedef enum em_tmo_type_t { - EM_TMO_TYPE_NONE = 0, /**< unknown or not timer-related event */ - EM_TMO_TYPE_ONESHOT = 1, /**< event is oneshot timeout indication */ - EM_TMO_TYPE_PERIODIC = 2, /**< event is periodic timeout indication */ -} em_tmo_type_t; - -/** - * The timer tick has HW and timer specific meaning, but the type is always a - * 64-bit integer and is normally assumed to be monotonic and not to wrap - * around. Exceptions with exotic extra timers should be clearly documented. - */ -typedef uint64_t em_timer_tick_t; - -/** - * Fractional 64-bit unsigned value for timer frequency. - * - */ -typedef struct em_fract_u64_t { - /** Int */ - uint64_t integer; - - /** Numerator. Set 0 for integers */ - uint64_t numer; - - /** Denominator */ - uint64_t denom; -} em_fract_u64_t; - -/** - * Type for timer resolution parameters - * - * This structure is used to group timer resolution parameters that may - * affect each other. - * All time values are ns. - * - * @note This is used both as capability and configuration. When used as configuration - * either res_ns or res_hz must be 0 (for em_timer_create). - * @see em_timer_capability, em_timer_create - */ -typedef struct em_timer_res_param_t { - /** Clock source (system specific) */ - em_timer_clksrc_t clk_src; - /** resolution, ns */ - uint64_t res_ns; - /** resolution, hz */ - uint64_t res_hz; - /** minimum timeout, ns */ - uint64_t min_tmo; - /** maximum timeout, ns */ - uint64_t max_tmo; -} em_timer_res_param_t; - -/** - * Timer ring timing parameters. - * - */ -typedef struct em_timer_ring_param_t { - /** Clock source (system specific) */ - em_timer_clksrc_t clk_src; - /** Base rate, i.e. minimum period rate */ - em_fract_u64_t base_hz; - /** Maximum base rate multiplier needed. 1 for single rate = base_hz */ - uint64_t max_mul; - /** Resolution */ - uint64_t res_ns; -} em_timer_ring_param_t; - -/** - * Structure used to create a timer or inquire its configuration later. - * - * This needs to be initialized with em_timer_attr_init(), which fills default - * values to each field. After that the values can be modified as needed. - * Values are considered a requirement, e.g. setting 'resparam.res_ns' to 1000(ns) - * requires at least 1us resolution. The timer creation will fail if the implementation - * cannot support such resolution (like only goes down to 1500ns). - * The implementation is free to provide better than requested, but not worse. - * - * To know the implementation specific limits use em_timer_capability and em_timer_res_capability. - * - * When creating the alternative periodic ring timer, this needs to be initialized - * with em_timer_ring_attr_init instead. EM_TIMER_FLAG_RING will be set by - * em_timer_ring_attr_init so it does not need to be manually set. - * - * @see em_timer_attr_init, em_timer_ring_attr_init, em_timer_capability - */ -typedef struct em_timer_attr_t { - /** Resolution parameters. Set when not creating periodic ring. - * This gets cleared by em_timer_ring_attr_init - */ - em_timer_res_param_t resparam; - /** Maximum simultaneous timeouts */ - uint32_t num_tmo; - /** Extra flags. A set flag is a requirement */ - em_timer_flag_t flags; - /** Optional name for this timer */ - char name[EM_TIMER_NAME_LEN]; - - /** - * used when creating alternative periodic ring timer. - * Cleared by em_timer_attr_init - */ - em_timer_ring_param_t ringparam; - - /** - * Internal check - don't touch! - * - * EM will verify that em_timer_attr_init() has been called before - * creating a timer - */ - uint32_t __internal_check; -} em_timer_attr_t; - -/** - * Timeout statistics counters - * - * Some fields relate to periodic timeout only (0 on one-shots) and vice versa. - * New fields may be added later at the end. - */ -typedef struct em_tmo_stats_t { - /** number of periodic ack() calls */ - uint64_t num_acks; - /** number of delayed periodic ack() calls. 0 with ring timer */ - uint64_t num_late_ack; - /** number of skipped periodic timeslots due to late ack. 0 with ring timer */ - uint64_t num_period_skips; -} em_tmo_stats_t; - -/** - * Timer capability info - */ -typedef struct em_timer_capability_t { - /** Number of supported timers of all types */ - uint32_t max_timers; - /** Maximum number of simultaneous timeouts. 0 means only limited by memory */ - uint32_t max_num_tmo; - /** Highest supported resolution and related limits for a timeout */ - em_timer_res_param_t max_res; - /** Longest supported timeout and related resolution */ - em_timer_res_param_t max_tmo; - - /** alternate periodic ring */ - struct { - /** Maximum ring timers */ - uint32_t max_rings; - /** Maximum simultaneous ring timeouts */ - uint32_t max_num_tmo; - /** Minimum base_hz */ - em_fract_u64_t min_base_hz; - /** Minimum base_hz */ - em_fract_u64_t max_base_hz; - } ring; - -} em_timer_capability_t; - -/** - * tmo optional extra arguments - * - */ -typedef struct em_tmo_args_t { - /** can be used with ring timer, see em_tmo_get_userptr */ - void *userptr; -} em_tmo_args_t; - -/** - * Initialize em_timer_attr_t - * - * Initializes em_timer_attr_t to system specific default values. - * After initialization user can adjust the values as needed before - * calling em_timer_create. em_timer_capability() and/or em_timer_res_capability() - * can optionally be used to find valid values. - * - * Always initialize em_timer_attr_t with em_timer_attr_init before any use. - * - * This function will not trigger errorhandler calls internally. - * - * Example for all defaults - * @code - * em_timer_attr_t tmr_attr; - * em_timer_attr_init(&tmr_attr); - * em_timer_t tmr = em_timer_create(&tmr_attr); - * @endcode - * - * @param tmr_attr Pointer to em_timer_attr_t to be initialized - * - * @see em_timer_capability, em_timer_create - */ -void em_timer_attr_init(em_timer_attr_t *tmr_attr); - -/** - * Initialize em_timer_ring_attr_t - * - * Initializes em_timer_ring_attr_t according to given values. - * After successful return the attributes can be given to em_timer_ring_create. - * Note, that if the implementation cannot use exact given combination it may - * update the ring_attr values, but always to meet or exceed given value. - * User can read the new values to determine if they were modified. - * Error is returned if given values cannot be met. - * - * Before creating the ring timer other values like num_tmo and name can be - * adjusted as needed. Also if non-integer frequency is needed the base_hz - * fractional part can be adjusted before timer_ring_create. - * - * This function will not trigger errorhandler calls. - * - * @param [out] ring_attr Pointer to em_timer_attr_t to be initialized - * @param clk_src Clock source to use (system specific or portable - * EM_TIMER_CLKSRC_DEFAULT) - * @param base_hz Base rate of the ring (minimum rate i.e. longest period) - * @param max_mul Maximum multiplier (maximum rate = base_hz * max_mul) - * @param res_ns Required resolution of the timing or 0 to accept default - * - * @return EM_OK if the given clk_src and other values are supported - * - * @see em_timer_ring_capability, em_timer_ring_create - */ -em_status_t em_timer_ring_attr_init(em_timer_attr_t *ring_attr, - em_timer_clksrc_t clk_src, - uint64_t base_hz, - uint64_t max_mul, - uint64_t res_ns); - -/** - * Inquire timer capabilities - * - * Returns timer capabilities for the given clock source, which is also written - * to both 'capa->max_res.clk_src' and 'capa->max_tmo.clk_src'. - * For resolution both 'res_ns' and 'res_hz' are filled. - * - * This function will not trigger errorhandler calls internally. - * - * @param capa pointer to em_timer_capability_t to be updated - * (does not need to be initialized) - * @param clk_src Clock source to use for timer - * (EM_TIMER_CLKSRC_DEFAULT for system specific default) - * @return EM_OK if the given clk_src is supported (capa updated) - * - * @see em_timer_capability_t, em_timer_res_capability - */ -em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src); - -/** - * Inquire timer capabilities for a specific resolution or maximum timeout - * - * Returns timer capabilities by given resolution or maximum timeout. - * Set one of resolution (res.res_ns) or maximum timeout (res.max_tmo) to required value - * and the other to zero and this will fill the other fields with valid limits. - * Error is returned if the given value is not supported. - * The given clk_src is used to set the values and also written to 'res->clk_src'. - * Both 'res_ns' and 'res_hz' are filled, so if this is passed to em_timer_create, - * one of those must be set to 0. - * - * Example for external clock maximum resolution - * @code - * em_timer_attr_t *tmr_attr; - * em_timer_capability_t capa; - * - * em_timer_attr_init(&tmr_attr); - * if (em_timer_capability(&capa, EM_TIMER_CLKSRC_EXT) != EM_OK) { - * // external clock not supported - * } - * tmr_attr.resparam = capa.max_res; - * tmr_attr.resparam.res_hz = 0; - * tmr = em_timer_create(&tmr_attr); - * @endcode - * - * This function will not trigger errorhandler calls internally. - * - * @param res Pointer to em_timer_res_param_t with one field set - * @param clk_src Clock source to use for timer - * (EM_TIMER_CLKSRC_DEFAULT for system specific default) - * @return EM_OK if the input value is supported (res updated) - * - * @see em_timer_capability - */ -em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src); - -/** - * @brief Check periodic ring timer capability. - * - * Returns ring timer capability from given input values. On input parameter - * ring must be initialized with the required values. res_ns can be 0, - * which gets replaced by the system default. - * During the call values are updated. If this returns EM_OK then the combination - * of given values are all supported (or exceeded e.g. better resolution), - * otherwise values are updated with the closest supported. - * - * As em_timer_ring_attr_init only takes integer base_hz, this can also be used - * to verify valid values for modified fractional frequencies to avoid - * errorhandler call from timer_ring_create(). - * - * This function will not trigger errorhandler calls. - * - * @param ring [in,out] timer ring parameters to check - * - * @retval EM_OK Parameter combination is supported - * @retval EM_ERR_NOT_SUPPORTED Parameters not supported, values updated to closest - * @retval (other error) Unsupported arguments - */ -em_status_t em_timer_ring_capability(em_timer_ring_param_t *ring); - -/** - * Create and start a timer resource - * - * Required attributes are given via tmr_attr. The given structure must be - * initialized with em_timer_attr_init before setting any field. - * - * Timer resolution can be given as time 'res_ns' or frequency 'res_hz'. User - * must choose which one to use by setting the other one to 0. - * - * To use all defaults initialize tmr_attr with em_timer_attr_init() and pass it - * as is to em_timer_create(). - * - * @note NULL is no longer supported, must give pointer to initialized em_timer_attr_t - * - * @param tmr_attr Timer parameters to use, pointer to initialized em_timer_attr_t - * - * @return Timer handle on success or EM_TIMER_UNDEF on error - * - * @see em_timer_attr_init, em_timer_capability - */ -em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr); - -/** - * Create and start a timer ring (alternative periodic timer) - * - * Required attributes are given via ring_attr, which must have been initialized - * with em_timer_ring_attr_init and optionally adjusted for the required timing - * constraints. - * - * A periodic ring timer is different and will only send EM_EVENT_TYPE_TIMER_IND - * events, which are automatically provided and cannot be modified. These events - * can be allocated only via timer APIs. - * - * Example for 1ms ... 125us periodic ring timer (base 1000 hz, multiplier up to 8): - * @code - * em_timer_ring_attr_t attr; - * if (em_timer_ring_attr_init(&attr, EM_TIMER_CLKSRC_DEFAULT, 1000, 8, 0) != EM_OK) { - * // given values not supported - * } - * - * em_timer_t tmr = em_timer_ring_create(&attr); - * if (tmr == EM_TIMER_UNDEF) { - * // handle error here or via error handler - * } - * @endcode - * - * @param ring_attr Timer ring parameters to use - * - * @return Timer handle on success or EM_TIMER_UNDEF on error - * - * @see em_timer_ring_attr_init - */ -em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr); - -/** - * Stop and delete a timer - * - * Delete a timer, frees all resources. - * All timeouts for this timer must have been deleted first. - * - * @param tmr Timer handle - * - * @return EM_OK on success - */ -em_status_t em_timer_delete(em_timer_t tmr); - -/** - * Returns current tick value of the given timer - * - * This can be used for calculating absolute timeouts. - * - * @param tmr Timer handle - * - * @return Current time in timer specific ticks or 0 on non-existing timer - */ -em_timer_tick_t em_timer_current_tick(em_timer_t tmr); - -/** - * Allocate a new timeout - * - * Create a new timeout. Allocates the necessary internal resources from the - * given timer and prepares for em_tmo_set_abs/rel/periodic(). - * - * Scheduled queues are always supported. LOCAL or OUTPUT queues can not be - * used as timeout targets. Support for unscheduled queues is implementation - * specific. - * - * Flags are used to select functionality: - * - EM_TMO_FLAG_ONESHOT creates a one-shot timeout and - * - EM_TMO_FLAG_PERIODIC creates a periodic timeout. - * The flag EM_TMO_FLAG_NOSKIP can, in the periodic case, be 'OR':d into the - * flags to make the timeout acknowledgment never skip a missed timeout (the - * default is to skip missed time slots). - * - * If used timer is timer ring the NOSKIP flag is ignored. - * - * @param tmr Timer handle - * @param flags Functionality flags - * @param queue Target queue where the timeout event should be delivered - * - * @return Timeout handle on success or EM_TMO_UNDEF on failure - */ -em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue); - -/** - * Allocate a new timeout with extra arguments - * - * Like em_tmo_create but with additional argument. This can be used with any - * timer type, but e.g. the userptr argument is only used with ring timers - * using events of type EM_EVENT_TYPE_TIMER_IND that can carry userptr. - * - * @param tmr Timer handle - * @param flags Functionality flags - * @param queue Target queue where the timeout event should be delivered - * @param args Optional pointer holding extra arguments e.g. userptr for - * ring timers. NULL ok. - * - * @return Timeout handle on success or EM_TMO_UNDEF on failure - * @see em_tmo_create - */ -em_tmo_t em_tmo_create_arg(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue, - em_tmo_args_t *args); - -/** - * Free a timeout - * - * Free (destroy) a timeout. - * The user provided timeout event for an active timeout will be returned via - * cur_event and the timeout is cancelled. The timeout event for an expired, but - * not yet received timeout will not be returned. It is the responsibility of - * the application to handle that case (event will still be received). - * - * After and during this call the tmo handle is not valid anymore and must not - * be used. With periodic timeout means em_tmo_ack must also not be called when - * tmo is deleted. - * - * @param tmo Timeout handle - * @param[out] cur_event Current event for an active timeout - * - * @return EM_OK on success - */ -em_status_t em_tmo_delete(em_tmo_t tmo, em_event_t *cur_event); - -/** - * Activate a oneshot timeout with absolute time. - * - * Activates oneshot timeout to expire at specific absolute time. The given - * timeout event will be sent to the queue given to em_tmo_create() when the - * timeout expires. - * - * It is not possible to send timeouts with an event group, but the application - * can assign the event group when receiving the timeout event, see - * em_event_group_assign(). - * - * The timeout event should not be accessed after it has been given to the - * timer, similar to sending an event. - * - * Even if not guaranteed, the implementation should make sure that this call - * can fail only in exceptional situations (em_tmo_create() should pre- - * allocate needed resources). - * - * Allowed minimum and maximum timeout can be inquired with - * em_timer_res_capability. - * - * An active timeout can not be modified. The timeout needs to be canceled and - * then set again with new arguments. - * - * An inactive timeout can be re-used by calling em_tmo_set_abs/rel() again - * after the previous timeout was received or was cancelled successfully. - * - * This function is for activating oneshot timeouts only. To activate - * a periodic timer use em_tmo_set_periodic() instead. - * - * @param tmo Timeout handle - * @param ticks_abs Expiration time in absolute timer specific ticks - * @param tmo_ev Timeout event - * - * @retval EM_OK success (event taken) - * @retval EM_ERR_TOONEAR failure, tick value is in past or too close to - * current time. Errorhandler not called, event - * not taken - * @retval (other_codes) failure, event not taken - * - * @see em_timer_res_capability - */ -em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, - em_event_t tmo_ev); - -/** - * Activate a timeout with relative time. - * - * Similar to em_tmo_set_abs(), but instead of an absolute time uses timeout - * value relative to the moment of the call. - * - * This function is for activating oneshot timeouts only. To activate - * a periodic timer use em_tmo_set_periodic() instead. - * - * @param tmo Timeout handle - * @param ticks_rel Expiration time in relative timer specific ticks - * @param tmo_ev Timeout event handle - * - * @retval EM_OK success (event taken) - * @retval EM_ERR_TOONEAR failure, tick value is too low. Errorhandler not - * called, event not taken - * @retval (other_codes) failure, event not taken - * - * @deprecated Do not use for periodic timeouts - * - * @see em_tmo_set_abs, em_tmo_set_periodic - */ -em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, - em_event_t tmo_ev); - -/** - * Activate a periodic timeout - * - * Used to activate periodic timeouts. The first period can be different from - * the repetitive period by providing an absolute start time e.g. the first period - * starts from that moment. Use 0 as start time if the period can start from the - * moment of the call (relative). - * - * The timeout event will be sent to the queue given to em_tmo_create() when the - * first timeout expires. Receiver then need to call em_tmo_ack() to allow - * sending next event. - * - * This function can only be used with periodic timeouts (created with flag - * EM_TMO_FLAG_PERIODIC). - * - * @param tmo Timeout handle - * @param start_abs Absolute start time (or 0 for period starting at call time) - * @param period Period in timer specific ticks - * @param tmo_ev Timeout event handle - * - * @retval EM_OK success (event taken) - * @retval EM_ERR_TOONEAR failure, tick value is in past or too close to - * current time. Errorhandler not called, event - * not taken - * @retval (other_codes) failure, event not taken - * - * @see em_tmo_ack - */ -em_status_t em_tmo_set_periodic(em_tmo_t tmo, - em_timer_tick_t start_abs, - em_timer_tick_t period, - em_event_t tmo_ev); - -/** - * Activate a periodic timeout on a periodic ring timer - * - * Use start_abs value 0 to start the timer relative to current time. To adjust - * the offset of timeouts an absolute tick can also be given, but the maximum - * distance from current time can only be up to one period. - * Periodic rate of the timeout event is base_hz (given when creating the timer) - * multiplied by the given multiplier. For example 1000Hz base_hz with multiplier - * of 8 will give 125us period. - * - * Timeout event of type EM_EVENT_TYPE_TIMER_IND is automatically allocated if - * not provided and will be sent to the queue given to em_tmo_create() when the - * timeout expires. User then needs to call em_timer_ack like with normal - * periodic timeout. With ring timer however there is no guaranteed flow control, - * new events may be sent even before user has called ack. This means the same - * event may be in the input queue multiple times if the application can not - * keep up the period rate. - * If the destination queue is not atomic the same event can then also be - * concurrently received by multiple cores. This is a race hazard to prepare for. - * Additionally the used event can not change via em_tmo_ack, the received event - * must always be returned. - * - * The last argument tmo_ev is normally EM_EVENT_UNDEF for a new timeout start. - * Then the implementation will use pre-allocated event. Exception is re-use of - * canceled ring timeout event (when ack returns EM_ERR_CANCELED the event stays - * with user and can be re-used). Such event can be recycled here to avoid extra - * free and alloc. - * - * This function can only be used with periodic timeouts with a ring timer. - * The timeout indication event is read-only and can be accessed only via - * accessor APIs. - * - * @param tmo Timeout handle - * @param start_abs Absolute start time (or 0 for period starting at call time) - * @param multiplier Rate multiplier (period rate = multiplier * timer base_hz) - * @param tmo_ev Event of type EM_EVENT_TYPE_TIMER_IND to re-use. - * Normally EM_EVENT_UNDEF. - * - * @retval EM_OK success - * @retval EM_ERR_TOONEAR failure, start tick value is past or too close - * to current time or multiplier is too high - * @retval EM_ERR_TOOFAR failure, start tick value exceeds one period - * @retval (other_codes) failure - * - * @see em_tmo_get_user_ptr, em_tmo_get_type, em_timer_create_ring - */ -em_status_t em_tmo_set_periodic_ring(em_tmo_t tmo, - em_timer_tick_t start_abs, - uint64_t multiplier, - em_event_t tmo_ev); - -/** - * Cancel a timeout - * - * Cancels a timeout preventing future expiration. Returns the timeout event - * in case the timeout was not expired. A timeout that has already expired or - * just about to cannot be cancelled and the timeout event will be delivered to - * the destination queue. In this case cancel will return an error as it was - * too late to cancel. Errorhandler is not called if failure is due to expired - * timeout only. - * - * Periodic timeout: cancel may fail if attempted too close to the next period. - * This can be considered normal and indicates that one more timeout will be - * received. In this case errorhandler is not called, error status - * EM_ERR_TOONEAR returned and no event returned. When em_tmo_ack is then - * called on the canceled timeout event receive it will return EM_ERR_CANCELED - * to indicate this is the last event coming for this timeout. - * - * @param tmo Timeout handle - * @param[out] cur_event Event handle pointer to return the pending - * timeout event or EM_EVENT_UNDEF if cancel fails - * (e.g. called too late) - * - * @retval EM_OK success, event returned - * @retval EM_ERR_TOONEAR already expired (too late to cancel). - * Errorhandler not called - * @retval (other_codes) failure - * - * @see em_tmo_set_abs, em_tmo_set_rel, em_tmo_set_periodic, em_tmo_set_periodic_ring - */ -em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event); - -/** - * Acknowledge a periodic timeout - * - * All received periodic timeout events must be acknowledged with em_tmo_ack(). - * No further timeout event(s) will be sent before the user has acknowledged - * the previous one unless a ring timer is used. - * - * Timeout acknowledgment is usually done at the end of the EO-receive function - * to prevent race conditions (e.g. if the same event is re-used for the next - * timeout period also). The implementation will adjust for the processing delay - * so that the time slot will not drift over time. - * - * If em_tmo_ack() is called too late, e.g. the next period(s) is already - * passed, the implementation by default will skip all the missed time slots and - * arm for the next future one keeping the original start offset. Application - * can alter this behaviour with the flag EM_TMO_FLAG_NOSKIP when creating a - * timeout. Then no past timeout is skipped and each late acknowledgment will - * immediately trigger sending the next timeout event until current time has - * been reached. - * Note that using EM_TMO_FLAG_NOSKIP may result in an event storm if a large - * number of timeouts have been unacknowledged for a longer time (limited by - * application response latency). Timing problems will not call errorhandler. - * - * If the timer has been canceled, but the cancel happened too late for the - * current period the timeout will be delivered. If application then calls - * em_tmo_ack it returns EM_ERR_CANCELED and does not call errorhandler. This is - * to signal it was the last timeout coming for that tmo. - * - * Application may re-use the same received timeout event or provide a new one - * for the next timeout. With ring timer the received event must be returned. - * - * The given event should not be touched after calling this function until it - * has been received again or after the timeout is successfully cancelled and - * event returned. - * - * Periodic timeout will stop if em_tmo_ack() returns an error other than - * timing related. The implementation will call errorhandler in this case - * unless timer was canceled, so the exception can be handled also there. - * - * em_tmo_ack() can only be used with periodic timeouts. - * - * @param tmo Timeout handle - * @param next_tmo_ev Next timeout event handle (can be the received one) - * - * @retval EM_OK success (event taken) - * @retval EM_ERR_CANCELED timer has been cancelled, no more coming, not taken - * @retval (other_codes) failure, event not taken - */ -em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev); - -/** - * Get a list of currently active timers. - * - * The timer handles returned via 'tmr_list' can be used for further timer - * queries or to destroy existing timers. - * - * The return value always reflects the actual number of timers in the - * EM instance but the output parameter 'tmr_list' is only written up to the - * given 'max' length. - * - * Note that the return value (number of timers) can be greater than the given - * 'max'. It is the user's responsibility to check the return value against the - * given 'max'. - * - * To only get the current number of active timers, without any timer handles - * output, use the following: num_timers = em_timer_get_all(NULL, 0); - * - * @param[out] tmr_list Pointer to an array of timer handles. - * Use NULL if only interested in the return value. - * @param max Max number of handles that can be written into tmr_list - * 'max' is ignored if 'tmr_list' is NULL. - * - * @return The number of active timers - */ -int em_timer_get_all(em_timer_t *tmr_list, int max); - -/** - * Get timer attributes - * - * Returns the actual capabilities of the given timer. - * - * @param tmr Timer handle - * @param [out] tmr_attr Pointer to em_timer_attr_t to fill - * - * @return EM_OK on success - */ -em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr); - -/** - * Returns the timer frequency, i.e. ticks per second for the given timer. - * - * Can be used to convert real time to timer specific tick. - * - * @param tmr Timer handle - * - * @return ticks per second (Hz), or 0 for non-existing timer - */ -uint64_t em_timer_get_freq(em_timer_t tmr); - -/** - * Convert timer tick to ns - * - * @param tmr Valid timer handle - * @param ticks Timer specific ticks to convert - * - * @return converted amount in ns - */ -uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks); - -/** - * Convert ns to timer tick - * - * @param tmr Valid timer handle - * @param ns ns value to convert - * - * @return converted amount in timer ticks - */ -em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns); - -/** - * Returns the current state of the given timeout. - * - * Note that the returned state may change at any time if the timeout expires - * or is manipulated by other threads. - * - * @param tmo Timeout handle - * - * @return current timeout state (EM_TMO_STATE_UNKNOWN on error) - * - * @see em_tmo_state_t - */ -em_tmo_state_t em_tmo_get_state(em_tmo_t tmo); - -/** - * Returns the statistic counters for a timeout. - * - * Returns a snapshot of the current counters of the given timeout. - * Statistics can be accessed while the timeout is valid, i.e. tmo created but - * not deleted. - * - * Counter support is optional. If counters are not supported the function - * returns EM_ERR_NOT_IMPLEMENTED. - * A quick way to detect whether counters are supported is to call the function - * with stat=NULL and check the return value. - * - * @param tmo Timeout handle - * @param [out] stat Pointer to em_tmo_stats_t to receive the values (NULL ok) - * - * @return EM_OK on success - */ -em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat); - -/** - * Ask if given event is currently used as timeout indication. - * - * This can be used with any valid event handle to ask if it is used as a - * timeout indication event. - * Events are updated for tmo type when going through timer API. - * @note As a received event is owned by the application and not necessarily - * passing through timer API anymore this type will not reset until event is - * freed, re-used as another timeout or explicitly reset by setting the reset - * argument to true. This reset should be done if re-using the received tmo event - * for something else than timeout to avoid expired value being returned in case - * someone later calls tmo_get_type. - * - * Successful timeout cancel (event returned) will reset the event type to - * EM_TMO_TYPE_NONE. - * - * @note The reset argument is ignored if the given event is of type - * EM_EVENT_TYPE_TIMER_IND. - * - * The related tmo handle can also be retrieved via parameter tmo. This - * can be useful to call em_tmo_ack() for periodic timeouts: - * @code - * em_tmo_t tmo; - * - * if (em_tmo_get_type(event, &tmo, false) == EM_TMO_TYPE_PERIODIC) - * retval = em_tmo_ack(tmo, event); - * @endcode - * - * @param event event handle to check - * @param [out] tmo pointer to em_tmo_t to receive related tmo handle (NULL ok) - * @param reset set true to reset tmo type to EM_TMO_TYPE_NONE for non-timer re-use - * - * @return type of timeout use or EM_TMO_TYPE_NONE if event is not related to a timeout - * @see em_tmo_type_t - */ -em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset); - -/** - * Returns the optional user pointer for a periodic ring timeout - * - * Can only be used with an event received as a timeout for a periodic ring, - * i.e. EM_EVENT_TYPE_TIMER_IND only. Other event types will return NULL. - * - * @param event Event received as timeout - * @param [out] tmo Optionally returns associated tmo handle. NULL ok. - * - * @return A pointer given when creating the associated tmo or - * NULL if the event is not ring timeout - */ -void *em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo); - -/** - * Returns the associated timer handle from a timeout handle - * - * Associated timer handle is returned from a valid timeout. Can be used to for - * instance read the current timer tick without having the timer handle: - * @code - * em_timer_tick_t tick = em_timer_current_tick(em_tmo_get_timer(tmo)); - * @endcode - * - * @param tmo valid timeout handle - * - * @return associated timer handle or EM_TIMER_UNDEF if tmo is not valid - * - */ -em_timer_t em_tmo_get_timer(em_tmo_t tmo); - -/** - * Convert a timer handle to an unsigned integer - * - * @param timer timer handle to be converted - * @return uint64_t value that can be used to print/display the handle - * - * @note This routine is intended to be used for diagnostic purposes - * to enable applications to e.g. generate a printable value that represents - * an em_timer_t handle. - */ -uint64_t em_timer_to_u64(em_timer_t timer); - -/** - * Convert a timeout handle to an unsigned integer - * - * @param tmo timeout handle to be converted - * @return uint64_t value that can be used to print/display the handle - * - * @note This routine is intended to be used for diagnostic purposes - * to enable applications to e.g. generate a printable value that represents - * an em_tmo_t handle. - */ -uint64_t em_tmo_to_u64(em_tmo_t tmo); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_TIMER_H_ */ +/* + * Copyright (c) 2016-2024, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT_MACHINE_TIMER_H_ +#define EVENT_MACHINE_TIMER_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * Event Machine timer + * @defgroup em_timer Event timer + * Event Machine timer + * @{ + * + * The timer API can be used to request an event to be sent to a specified + * queue at a specified time once (one-shot) or at regular intervals (periodic). + * A timer needs to be created first - it represents a collection of timeouts + * with certain attributes (e.g. timeout resolution and maximum period). + * A timer can be mapped to a HW resource on an SoC, thus the number of timers, + * capabilities and time bases are system specific. Typically only a few timers + * are supported. + * The application can specify required capabilities when a timer is created. + * The creation will fail if the implementation cannot fulfill the required + * values. Timers are typically created once at system startup. + * + * A timer is a shared resource with proper synchronization for concurrent + * multi-thread use. It is possible to exclude all multi-thread protections if a + * timer is used exclusively by a single thread (for potential performance + * gains). This is done by setting EM_TIMER_FLAG_PRIVATE when creating a timer. + * Setting this flag means that the application must ensure that only a single + * thread is using the timer (this also includes the receiver of periodic + * timeouts due to the ack-functionality). This private-mode is not necessarily + * implemented on all systems, in which case the flag is ignored as it will not + * cause any functional difference. + * + * Timeouts (tmo) can be created once a timer exists. Creating a timeout + * allocates the resources needed to serve the timeout, but does not arm it. + * This makes it possible to pre-create timeout(s) and set the expiry + * later at runtime. This can improve performance but also minimizes the + * possibility that the runtime call setting the expiry would fail, as resources + * have already been reserved beforehand. + * + * A pending timeout can be cancelled. Note that there is no way to cancel an + * expired timeout for which the event has already been sent but not yet + * received by the application. Canceling in this case will return an error + * to enable the application to detect the situation. For a periodic timer, + * a cancel will stop further timeouts, but may not be able to prevent the + * latest event from being received. An active timeout cannot be altered without + * canceling it first. + * + * A timeout can be reused after the timeout event has been received or when + * successfully cancelled. Timeouts need to be deleted after use. Deletion frees + * the resources reserved during creation. + * + * The timeout value is an abstract system and timer dependent tick count. + * It is assumed that the tick count increases with a static frequency. + * The frequency can be inquired at runtime for time calculations, e.g. tick + * frequency divided by 1000 gives ticks for 1ms. The tick frequency is at least + * equal to the resolution, but can also be higher (implementation can quantize + * ticks to any underlying implementation). The supported resolution can also be + * inquired. + * A clock source can be specified when creating a timer. It defines the time + * base of the timer for systems with multiple sources implemented (optional). + * EM_TIMER_CLKSRC_DEFAULT is a portable value that implements a basic + * monotonic time, that will not wrap back to zero in any reasonable uptime. + * + * Events with major event types EM_EVENT_TYPE_SW, EM_EVENT_TYPE_PACKET and + * EM_EVENT_TYPE_TIMER can be used as timeout events to indicate expiry. The + * type EM_EVENT_TYPE_TIMER is an alternative to EM_EVENT_TYPE_SW and works the + * same way. Additionally, for periodic ring timer only, the type + * EM_EVENT_TYPE_TIMER_IND is used. This is a special timeout indication event + * without visible payload. + * + * Regular periodic timeouts: + * (i.e. NOT periodic ring timer timeouts, see differences further down) + * A periodic timer requires the application to acknowledge each received + * timeout event after it has been processed. The acknowledgment activates the + * next timeout and compensates for the processing delay to keep the original + * interval. This creates a flow control mechanism and also protects the event + * handling from races if the same event is reused every time - the next + * timeout will not be sent before the previous has been acknowledged. + * The event to be received for each periodic timeout can also be different as + * the next event is given by the application via the acknowledgment. + * The target queue cannot be modified after the timeout has been created. + * + * If the acknowledgment of a periodic timeout is done too late (after the next + * period has already passed), the default action is to skip the missed timeout + * slot(s) and arm for the next valid slot. If the application never wants to + * skip a missed timeout it can set the flag EM_TMO_FLAG_NOSKIP when creating a + * timeout. This causes each acknowledgment to schedule an immediate timeout + * event until all the missed time slots have been served. This keeps the number + * of timeouts as expected but may cause an event storm if a long processing + * delay has occurred. + * + * The timeout handle is needed when acknowledging a periodic timeout event. + * Because any event can be used for the timeout, the application must itself + * provide a way to derive the timeout handle from the received timeout event. + * A typical way is to include the tmo handle within the timeout event. + * The application also needs to have a mechanism to detect which event is a + * periodic timeout to be able to acknowledge it via em_tmo_ack(). + * + * If the requested timeout tick value for a timeout is in the past or is too + * close to the current time then the error code EM_ERR_TOONEAR is returned. + * In this case EM will not call the error handler - instead EM lets the + * application decide whether to treat the situation as an error or to try again + * with an updated target time. + * + * Periodic ring timer: + * There is also an alternative periodic ring timer. It uses a different + * abstraction and is created and started via separate ring specific APIs. + * It has three main differences to the regular periodic timeouts: + * 1. Only a pre-defined read-only event type can be used and is provided + * by the timer (EM_EVENT_TYPE_TIMER_IND). + * 2. Flow control is not supported. Some implementations may have it, + * but the specification does not quarantee any so the user needs to be + * prepared to see the same event enqueued multiple times if handling of + * the received timeouts is not fast enough. + * 2. A limited set of period times are supported per timer (the base rate or + * an integer multiple thereof). + * + * Ring timers can be thought of as a clock face ticking the pointer forward. + * One cycle around is the base rate (minimum rate). The same timeout can be + * inserted into multiple locations evenly spread within the clock face thus + * multiplying the base rate. The starting offset can be adjusted only up to + * one timeout period. + * Depending on platform, this mode may provide better integration with HW and + * thus have less runtime overhead. However, as it exposes a potential queue + * overflow and a race hazard (race avoidable by using atomic queue as target), + * regular periodic timeouts are recommended as a default. + * + * Example usage + * @code + * + * // This would typically be done at application init. + * // Accept all defaults but change the name + * em_timer_attr_t attr; + * em_timer_attr_init(&attr); + * strncpy(attr.name, "myTimer", EM_TIMER_NAME_LEN); + * em_timer_t tmr = em_timer_create(&attr); + * if(tmr == EM_TIMER_UNDEF) { + * // handle error here or via the error handler + * } + * + * // At runtime - create a timeout resource. + * // Can be done in advance to save time if the target queue is known. + * em_tmo_t tmo = em_tmo_create(tmr, EM_TIMER_FLAG_ONESHOT, target_queue); + * if(tmo == EM_TMO_UNDEF) { + * // no such timer or out of resources + * // handle error here or via error handler + * } + * + * // Get the timer tick frequency + * uint64_t hz = em_timer_get_freq(tmr); + * + * // Activate a 10ms timeout from now. + * // Very unlikely to fail with valid arguments. + * if (em_tmo_set_rel(tmo, hz / 100, my_tmo_event) != EM_OK) { + * // handle error here or via error handler + * } + * + * @endcode + * + */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @typedef em_timer_t + * System specific type for a timer handle. + */ + +/** + * @typedef em_tmo_t + * System specific type for a timeout handle. + */ + +/** + * @typedef em_timer_flag_t + * System specific type for timer flags. + * This is system specific, but all implementations must define + * EM_TIMER_FLAG_DEFAULT and EM_TIMER_FLAG_PRIVATE, of which the latter is used + * to skip API synchronization for single threaded apps. + * Flags can be combined by bitwise OR. + */ + +/** + * @typedef em_tmo_flag_t + * System specific enum type for timeout flags. + * This is system specific, but all implementations must define + * EM_TMO_FLAG_ONESHOT, EM_TMO_FLAG_PERIODIC and EM_TMO_FLAG_NOSKIP. + * Flags can be combined by bitwise OR. + */ + +/** + * @typedef em_timer_clksrc_t + * System specific enum type for timer clock source. + * This is system specific, but all implementations must define + * EM_TIMER_CLKSRC_DEFAULT. + */ + +/** + * Visible state of a timeout + */ +typedef enum em_tmo_state_t { + EM_TMO_STATE_UNKNOWN = 0, + EM_TMO_STATE_IDLE = 1, /**< just created or canceled */ + EM_TMO_STATE_ACTIVE = 2, /**< armed */ + EM_TMO_STATE_INACTIVE = 3 /**< unused state */ +} em_tmo_state_t; + +/** + * Type returned by em_tmo_get_type() + */ +typedef enum em_tmo_type_t { + EM_TMO_TYPE_NONE = 0, /**< unknown or not a timer-related event */ + EM_TMO_TYPE_ONESHOT = 1, /**< event is a oneshot timeout indication */ + EM_TMO_TYPE_PERIODIC = 2, /**< event is a periodic timeout indication */ +} em_tmo_type_t; + +/** + * The timer tick has HW and timer specific meaning, but the type is always a + * 64-bit integer and is normally assumed to be monotonic and not to wrap + * around. Exceptions with exotic extra timers should be clearly documented. + */ +typedef uint64_t em_timer_tick_t; + +/** + * Fractional 64-bit unsigned value for timer frequency. + */ +typedef struct em_fract_u64_t { + /** Int */ + uint64_t integer; + + /** Numerator. Set 0 for integers */ + uint64_t numer; + + /** Denominator */ + uint64_t denom; +} em_fract_u64_t; + +/** + * Type for timer resolution parameters. + * + * This structure is used to group timer resolution parameters that may affect + * each other. All time values are in nanoseconds (ns). + * + * @note This type used both as capability and configuration. When used as + * configuration either res_ns or res_hz must be 0 (for em_timer_create()). + * @see em_timer_capability(), em_timer_create() + */ +typedef struct em_timer_res_param_t { + /** Clock source (system specific) */ + em_timer_clksrc_t clk_src; + /** resolution, ns */ + uint64_t res_ns; + /** resolution, hz */ + uint64_t res_hz; + /** minimum timeout, ns */ + uint64_t min_tmo; + /** maximum timeout, ns */ + uint64_t max_tmo; +} em_timer_res_param_t; + +/** + * Periodic timer ring timing parameters. + */ +typedef struct em_timer_ring_param_t { + /** Clock source (system specific) */ + em_timer_clksrc_t clk_src; + /** Base rate, i.e. minimum period rate */ + em_fract_u64_t base_hz; + /** Maximum base rate multiplier needed. 1 for single rate = base_hz */ + uint64_t max_mul; + /** Resolution */ + uint64_t res_ns; +} em_timer_ring_param_t; + +/** + * EM timer attributes. + * + * The type is used when creating a timer or inquiring its configuration later. + * + * This needs to be initialized with em_timer_attr_init(), which fills default + * values to each field. After that the values can be modified as needed. + * Values set are considered a requirement, e.g. setting 'resparam.res_ns' to + * 1000(ns) requires the timer to have at least 1us resolution. The timer + * creation will fail if the implementation cannot support such a resolution + * (e.g. if it only goes down to 1500ns). + * The implementation is free to provide better than requested, but not worse. + * + * To know the implementation specific limits, use em_timer_capability() and + * em_timer_res_capability(). + * + * When creating the alternative periodic ring timer, this type needs to be + * initialized with em_timer_ring_attr_init() instead. EM_TIMER_FLAG_RING will + * be set by em_timer_ring_attr_init() so it does not need to be manually set. + * + * @see em_timer_attr_init(), em_timer_create(), + * em_timer_ring_attr_init(), em_timer_ring_create(), + * em_timer_ + */ +typedef struct em_timer_attr_t { + /** + * Resolution parameters for em_timer_create(). + * Used when creating normal one shot or periodic timers, but not when + * creating periodic ring timers (see ringparam below instead). + * (cleared by em_timer_ring_attr_init() when using a ring timer) + */ + em_timer_res_param_t resparam; + + /** Maximum simultaneous timeouts */ + uint32_t num_tmo; + /** Extra flags. A set flag is a requirement */ + em_timer_flag_t flags; + /** Optional name for this timer */ + char name[EM_TIMER_NAME_LEN]; + + /** + * Parameters specifically for em_timer_ring_create(). + * Used when creating an alternative periodic ring timer. + * (cleared by em_timer_attr_init() since not needed in that case) + */ + em_timer_ring_param_t ringparam; + + /** + * Internal check - don't touch! + * + * EM will verify that em_timer_attr_init() has been called before + * creating a timer + */ + uint32_t __internal_check; +} em_timer_attr_t; + +/** + * Timeout statistics counters + * + * Some fields relate to periodic timeout only (0 on one-shots) and vice versa. + * New fields may be added later at the end. + */ +typedef struct em_tmo_stats_t { + /** number of periodic ack() calls */ + uint64_t num_acks; + /** number of delayed periodic ack() calls. 0 with ring timer */ + uint64_t num_late_ack; + /** number of skipped periodic timeslots due to late ack. 0 with ring timer */ + uint64_t num_period_skips; +} em_tmo_stats_t; + +/** + * Timer capability info + */ +typedef struct em_timer_capability_t { + /** Number of supported timers of all types */ + uint32_t max_timers; + /** Maximum number of simultaneous timeouts. 0 means only limited by memory */ + uint32_t max_num_tmo; + /** Highest supported resolution and related limits for a timeout */ + em_timer_res_param_t max_res; + /** Longest supported timeout and related resolution */ + em_timer_res_param_t max_tmo; + + /** alternate periodic ring */ + struct { + /** Maximum ring timers */ + uint32_t max_rings; + /** Maximum simultaneous ring timeouts */ + uint32_t max_num_tmo; + /** Minimum base_hz */ + em_fract_u64_t min_base_hz; + /** Minimum base_hz */ + em_fract_u64_t max_base_hz; + } ring; + +} em_timer_capability_t; + +/** + * tmo optional extra arguments + * + */ +typedef struct em_tmo_args_t { + /** can be used with ring timer, see em_tmo_get_userptr */ + void *userptr; +} em_tmo_args_t; + +/** + * Initialize em_timer_attr_t for normal timers (i.e. NOT periodic ring timers). + * + * Initializes em_timer_attr_t to system specific default values. + * The user can after initialization adjust the values as needed before + * calling em_timer_create(). The functions em_timer_capability() and/or + * em_timer_res_capability() can optionally be used to find valid values. + * + * Always initialize em_timer_attr_t with em_timer_attr_init() before use. + * + * This function will not trigger EM error handler calls internally. + * + * Example for all defaults + * @code + * em_timer_attr_t tmr_attr; + * em_timer_attr_init(&tmr_attr); + * em_timer_t tmr = em_timer_create(&tmr_attr); + * @endcode + * + * @param tmr_attr Pointer to em_timer_attr_t to be initialized + * + * @see em_timer_capability, em_timer_create + */ +void em_timer_attr_init(em_timer_attr_t *tmr_attr); + +/** + * Initialize em_timer_attr_t for periodic ring timers. + * + * Initializes em_timer_ring_attr_t according to given values. + * After successful return, the attributes can be given to em_timer_ring_create(). + * Note, that if the implementation cannot use the exact given combination it may + * update the ring_attr values, but always to meet or exceed the given values. + * The user can read the new values to determine if they were modified. + * An error is returned if the given values cannot be met. + * + * Before creating the ring timer, other values like num_tmo and name can be + * adjusted as needed. Also, if a non-integer frequency is needed, the base_hz + * fractional part can be adjusted before em_timer_ring_create(). + * + * This function will not trigger error handler calls. + * + * @param[out] ring_attr Pointer to em_timer_attr_t to be initialized + * @param clk_src Clock source to use (system specific or portable + * EM_TIMER_CLKSRC_DEFAULT) + * @param base_hz Base rate of the ring (minimum rate i.e. longest period) + * @param max_mul Maximum multiplier (maximum rate = base_hz * max_mul) + * @param res_ns Required resolution of the timer or 0 to accept default + * + * @return EM_OK if the given clk_src and other values are supported + * + * @see em_timer_ring_capability(), em_timer_ring_create() + */ +em_status_t em_timer_ring_attr_init(em_timer_attr_t *ring_attr, + em_timer_clksrc_t clk_src, + uint64_t base_hz, + uint64_t max_mul, + uint64_t res_ns); + +/** + * Inquire timer capabilities + * + * Returns timer capabilities for the given clock source, which is also written + * to both 'capa->max_res.clk_src' and 'capa->max_tmo.clk_src'. + * For resolution both 'res_ns' and 'res_hz' are filled. + * + * This function will not trigger error handler calls internally. + * + * @param capa pointer to em_timer_capability_t to be updated + * (does not need to be initialized) + * @param clk_src Clock source to use for timer + * (EM_TIMER_CLKSRC_DEFAULT for system specific default) + * @return EM_OK if the given clk_src is supported (capa updated) + * + * @see em_timer_capability_t, em_timer_res_capability + */ +em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src); + +/** + * Inquire timer capabilities for a specific resolution or maximum timeout + * + * Returns timer capabilities by the given resolution or maximum timeout. + * Set either the resolution (res.res_ns) or the maximum timeout (res.max_tmo) + * to the required value and the other to zero, and the function will fill the + * other fields with valid limits. + * An error is returned if the given value is not supported. + * The given clk_src is used to set the values and also written to 'res->clk_src'. + * Both 'res_ns' and 'res_hz' are filled, so if passed further to em_timer_create(), + * one of those must be set to 0. + * + * Example for external clock maximum resolution + * @code + * em_timer_attr_t *tmr_attr; + * em_timer_capability_t capa; + * + * em_timer_attr_init(&tmr_attr); + * if (em_timer_capability(&capa, EM_TIMER_CLKSRC_EXT) != EM_OK) { + * // external clock not supported + * } + * tmr_attr.resparam = capa.max_res; + * tmr_attr.resparam.res_hz = 0; + * tmr = em_timer_create(&tmr_attr); + * @endcode + * + * This function will not trigger error handler calls internally. + * + * @param res Pointer to em_timer_res_param_t with one field set + * @param clk_src Clock source to use for timer + * (EM_TIMER_CLKSRC_DEFAULT for system specific default) + * @return EM_OK if the input value is supported (res updated) + * + * @see em_timer_capability + */ +em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src); + +/** + * @brief Check periodic ring timer capability. + * + * Returns the ring timer capability based on the given input values. + * The parameter 'ring' must be initialized with the values required by the user. + * The ring.res_ns can be 0 and gets replaced by the system default. + * The values are updated during the call. If EM_OK is returned then the + * combination of given values are all supported (or exceeded, e.g. better + * resolution), otherwise values are updated with the closest supported. + * + * As em_timer_ring_attr_init() only takes integer base_hz, this can also be + * used to verify valid values for modified fractional frequencies to avoid + * error handler calls from em_timer_ring_create(). + * + * This function will not trigger error handler calls. + * + * @param ring[in,out] timer ring parameters to check + * + * @retval EM_OK Parameter combination is supported + * @retval EM_ERR_NOT_SUPPORTED Parameters not supported, values updated to closest + * @retval (other error) Unsupported arguments + */ +em_status_t em_timer_ring_capability(em_timer_ring_param_t *ring); + +/** + * Create and start a timer resource + * + * Required attributes are given via tmr_attr. The given structure must be + * initialized with em_timer_attr_init() before setting any field. + * + * Timer resolution can be given as time 'res_ns' or frequency 'res_hz'. + * The user must choose which one to use by setting the other one to 0. + * + * To use all defaults, initialize tmr_attr with em_timer_attr_init() and pass + * it as is to em_timer_create(). + * + * @param tmr_attr Timer parameters to use, pointer to an initialized em_timer_attr_t + * @note NULL is no longer supported, pointer must be to an initialized em_timer_attr_t + * + * @return Timer handle on success or EM_TIMER_UNDEF on error + * + * @see em_timer_attr_init(), em_timer_capability() + */ +em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr); + +/** + * Create and start a periodic timer ring (alternative periodic timer) + * + * The required attributes are given via ring_attr, which must have been + * initialized with em_timer_ring_attr_init() and optionally adjusted for the + * required timing constraints. + * + * A periodic ring timer is a bit different and will only send + * EM_EVENT_TYPE_TIMER_IND timeout events, which are automatically provided and + * cannot be modified. These events can be allocated only via timer APIs. + * + * Example for 1ms ... 125us periodic ring timer (base 1000 hz, multiplier up to 8): + * @code + * em_timer_ring_attr_t attr; + * if (em_timer_ring_attr_init(&attr, EM_TIMER_CLKSRC_DEFAULT, 1000, 8, 0) != EM_OK) { + * // given values not supported + * } + * + * em_timer_t tmr = em_timer_ring_create(&attr); + * if (tmr == EM_TIMER_UNDEF) { + * // handle error here or via error handler + * } + * @endcode + * + * @param ring_attr Timer ring parameters to use + * + * @return Timer handle on success or EM_TIMER_UNDEF on error + * + * @see em_timer_ring_attr_init + */ +em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr); + +/** + * Stop and delete a timer + * + * Delete a timer, free all resources. + * All timeouts for this timer must have been cancelled and deleted first. + * + * @param tmr Timer handle + * + * @return EM_OK on success + */ +em_status_t em_timer_delete(em_timer_t tmr); + +/** + * Return the current tick value of the given timer + * + * This can be used for calculating absolute timeouts. + * + * @param tmr Timer handle + * + * @return Current time in timer specific ticks or 0 on non-existing timer + */ +em_timer_tick_t em_timer_current_tick(em_timer_t tmr); + +/** + * Allocate a new timeout + * + * Create a new timeout. Allocates the necessary internal resources from the + * given timer and prepares for em_tmo_set_abs/rel/periodic(). + * + * Scheduled queues are always supported as timeout event destinations. LOCAL or + * OUTPUT queues can not be used as timeout targets. Support for unscheduled + * queues is implementation specific. + * + * Flags are used to select functionality: + * - EM_TMO_FLAG_ONESHOT creates a one-shot timeout and + * - EM_TMO_FLAG_PERIODIC creates a periodic timeout. + * The flag EM_TMO_FLAG_NOSKIP can, in the periodic case, be 'OR':d into the + * flags to make the timeout acknowledgment never skip a missed timeout (the + * default is to skip missed time slots). + * + * The NOSKIP flag is ignored if used timer is a periodic timer ring. + * + * @param tmr Timer handle + * @param flags Functionality flags + * @param queue Target queue where the timeout event should be delivered + * + * @return Timeout handle on success or EM_TMO_UNDEF on failure + */ +em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue); + +/** + * Allocate a new timeout with extra arguments + * + * Similar to em_tmo_create() but with an additional 'args' pointer. This API + * can be used with any timer type, but 'args->userptr' is only meaningful for + * ring timers using events of type EM_EVENT_TYPE_TIMER_IND that can carry a + * 'userptr'. + * + * @param tmr Timer handle + * @param flags Functionality flags + * @param queue Target queue where the timeout event should be delivered + * @param args Optional pointer holding extra arguments e.g. userptr for + * ring timers. NULL ok. + * + * @return Timeout handle on success or EM_TMO_UNDEF on failure + * @see em_tmo_create + */ +em_tmo_t em_tmo_create_arg(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue, + em_tmo_args_t *args); + +/** + * Delete a timeout + * + * The deleted timeout must be inactive i.e. it must be successfully canceled or + * the last timeout event must have been received (following too late a cancel). + * A periodic or a periodic ring timeout can be deleted after a successful + * cancel or after em_tmo_ack() returned EM_ERR_CANCELED. This indicates that + * the acknowledged timeout is canceled and that it was the last timeout event + * coming for that periodic timeout. + * + * After and during this call, the tmo handle is not valid anymore and must not + * be used by or passed to other timer APIs. + * + * @param tmo Timeout handle + * + * @return EM_OK on success + */ +em_status_t em_tmo_delete(em_tmo_t tmo); + +/** + * Activate a oneshot timeout with absolute time. + * + * Activates a oneshot timeout to expire at a specific absolute time. The given + * timeout event will be sent to the queue given to em_tmo_create() when the + * timeout expires. + * + * It is not possible to send timeouts with an event group, but the application + * can assign the event group when receiving the timeout event, see + * em_event_group_assign(). + * + * The timeout event should not be accessed after it has been given to the + * timer, similar to sending an event. + * + * Even if not guaranteed, the implementation should make sure that this call + * can fail only in exceptional situations (em_tmo_create() should pre-allocate + * needed resources). + * + * The allowed minimum and maximum timeouts can be inquired with + * em_timer_res_capability(). + * + * An active timeout can not be modified. The timeout needs to be canceled and + * then set again with new arguments. + * + * An inactive timeout can be reused by calling em_tmo_set_abs/rel() again. The + * timeout becomes inactive after the oneshot timeout event has been received + * or after it has been successfully cancelled. + * + * This function is for activating oneshot timeouts only. To activate + * periodic timeouts use em_tmo_set_periodic() (or em_tmo_set_periodic_ring()). + * + * @param tmo Timeout handle + * @param ticks_abs Expiration time in absolute timer specific ticks + * @param tmo_ev Timeout event + * + * @retval EM_OK Success, event taken. + * @retval EM_ERR_TOONEAR Failure, the tick value is in past or too close to the + * current time. Error handler not called, event not taken. + * @retval (other_codes) Failure, event not taken. + * + * @see em_timer_res_capability() + */ +em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, + em_event_t tmo_ev); + +/** + * Activate a timeout with a relative time. + * + * Similar to em_tmo_set_abs(), but instead of an absolute time uses a timeout + * value relative to the moment of the call. + * + * This function is for activating oneshot timeouts only. To activate + * periodic timeouts use em_tmo_set_periodic() (or em_tmo_set_periodic_ring()). + * + * @param tmo Timeout handle + * @param ticks_rel Expiration time in relative timer specific ticks + * @param tmo_ev Timeout event handle + * + * @retval EM_OK Success, event taken. + * @retval EM_ERR_TOONEAR Failure, the tick value is too low. + * Error handler not called, event not taken. + * @retval (other_codes) Failure, event not taken. + * + * @see em_tmo_set_abs(), em_tmo_set_periodic() + */ +em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, + em_event_t tmo_ev); + +/** + * Activate a periodic timeout + * + * Used to activate periodic timeouts. The first period can be different from + * the repetitive period by providing an absolute start time. + * Set 'start_abs' to 0 if the repetitive period can start from the moment of + * the call. + * + * The timeout event will be sent to the queue given to em_tmo_create() when the + * first timeout expires. The receiver then needs to call em_tmo_ack() to allow + * the timer to send the next event for the following period. + * + * This function can only be used with periodic timeouts (created with flag + * EM_TMO_FLAG_PERIODIC). + * + * @param tmo Timeout handle + * @param start_abs Absolute start time (or 0 for period starting at call time) + * @param period Period in timer specific ticks + * @param tmo_ev Timeout event handle + * + * @retval EM_OK Success, event taken + * @retval EM_ERR_TOONEAR Failure, the tick value is in past or too close to + * the current time. + * Error handler not called, event not taken. + * @retval (other_codes) Failure, event not taken. + * + * @see em_tmo_ack() + */ +em_status_t em_tmo_set_periodic(em_tmo_t tmo, + em_timer_tick_t start_abs, + em_timer_tick_t period, + em_event_t tmo_ev); + +/** + * Activate a periodic timeout on a periodic ring timer + * + * Use 'start_abs' value 0 to start the timer relative to current time. To + * adjust the offset of timeouts, an absolute tick can also be given, but the + * maximum distance from the current time can only be up to one period. + * The periodic rate of the timeout event is 'base_hz' (given when creating the + * timer) multiplied by the given 'multiplier'. For example 1000Hz 'base_hz' + * with a 'multiplier' of 8 will give a 125us period. + * + * A timeout event of type EM_EVENT_TYPE_TIMER_IND is automatically allocated, + * if not provided, and will be sent to the queue given to em_tmo_create() when + * the timeout expires. The user needs to call em_tmo_ack() when receiving the + * timeout event, similar as with a regular periodic timeout. However, with a + * ring timer there is no guaranteed flow control - new events may be sent even + * before user has called em_tmo_ack(). This means that the same event may be in + * the input queue multiple times if the application can not keep up with the + * period rate. If the destination queue is not atomic, the same event can also + * be concurrently received by multiple cores. This is a race hazard the user + * must prepare for. Additionally, the used timeout event can not be changed via + * em_tmo_ack(), the actual received event must always be passed to it. + * + * The last argument 'tmo_ev' is normally 'EM_EVENT_UNDEF' when activating a new + * periodic ring timeout. The implementation will in this case use a + * pre-allocated event. The exception case concerns reuse of a canceled ring + * timeout event (when em_tmo_ack() returns 'EM_ERR_CANCELED', the event stays + * with the user and can be reused). Such an event can be recycled via 'tmo_ev' + * to avoid an extra event free and alloc during reactivation. + * + * This function can only be used with periodic timeouts from a ring timer. + * The timeout indication event is read-only and can be accessed only via + * accessor APIs. + * + * @param tmo Timeout handle + * @param start_abs Absolute start time (or 0 for period starting at call time) + * @param multiplier Rate multiplier (period rate = multiplier * timer base_hz) + * @param tmo_ev Event of type EM_EVENT_TYPE_TIMER_IND to reuse. + * Normally EM_EVENT_UNDEF. + * + * @retval EM_OK Success + * @retval EM_ERR_TOONEAR Failure, start tick value is past or too close + * to current time or multiplier is too high. + * @retval EM_ERR_TOOFAR Failure, start tick value exceeds one period. + * @retval (other_codes) Failure + * + * @see em_tmo_get_user_ptr(), em_tmo_get_type(), em_timer_create_ring() + */ +em_status_t em_tmo_set_periodic_ring(em_tmo_t tmo, + em_timer_tick_t start_abs, + uint64_t multiplier, + em_event_t tmo_ev); + +/** + * Cancel a timeout + * + * Cancels a timeout preventing future expiration. Returns the timeout event + * if the timeout has not expired. + * A timeout that has already expired, or just is about to, is too late to be + * cancelled and the timeout event will be delivered to the destination queue. + * In this case the error 'EM_ERR_TOONEAR' is returned - no EM error handler is + * called. + * + * Periodic timeout: cancel may fail if attempted too close to the next period. + * This can be considered normal and indicates that at least one more timeout + * event will be delivered to the user. In this case, the error 'EM_ERR_TOONEAR' + * is returned and no valid event is output. The EM error handler is not called + * is this scenario. + * The user calls em_tmo_ack() for each received periodic timeout event. The + * em_tmo_ack() function returns 'EM_ERR_CANCELED' for the last timeout event + * from the cancelled periodic timeout to let the user know that it is now OK to + * e.g. delete the timeout. + * + * @param tmo Timeout handle + * @param[out] cur_event Event handle pointer to return the pending + * timeout event for a successful cancel or + * EM_EVENT_UNDEF if cancel fails (e.g. called too late) + * + * @retval EM_OK Cancel successful, timeout event returned. + * @retval EM_ERR_TOONEAR Timeout already expired, too late to cancel. + * EM error handler is not called. + * @retval (other_codes) Failure + * + * @see em_tmo_set_abs(), em_tmo_set_rel(), em_tmo_set_periodic(), + * em_tmo_set_periodic_ring() + * @see em_tmo_ack() for periodic timeouts + */ +em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event); + +/** + * Acknowledge a periodic timeout + * + * All received periodic timeout events must be acknowledged with em_tmo_ack(). + * No further timeout event(s) will be sent before the user has acknowledged + * the previous one unless a ring timer is used. + * + * Timeout acknowledgment is usually done at the end of the EO-receive function + * to prevent race conditions (e.g. if the same event is reused for the next + * timeout period also). The implementation will adjust for the processing delay + * so that the time slot will not drift over time. + * + * If em_tmo_ack() is called too late, e.g. the next period(s) is already + * passed, the implementation by default will skip all the missed time slots and + * arm for the next future one keeping the original start offset. The + * application can alter this behaviour with the flag 'EM_TMO_FLAG_NOSKIP' when + * creating a timeout: no past timeout will be skipped and each late + * acknowledgment will immediately trigger sending the next timeout event until + * the current time has been reached. + * Note that using 'EM_TMO_FLAG_NOSKIP' may result in an event storm if a large + * number of timeouts have been unacknowledged for a longer time (limited by + * application response latency). Timing problems will not call the EM error + * handler. + * + * If the timeout has been canceled, but the cancel happened too late for the + * current period, the timeout event will still be delivered. The em_tmo_ack() + * call for this event will return 'EM_ERR_CANCELED' and does not call the error + * handler. This error code signals that the timeout event was the last one + * coming for that, now cancelled, timeout. + * + * The application may reuse the same received timeout event or provide a new + * one for the next timeout via 'next_tmo_ev'. With a periodic ring timer, the + * actual received event must be always be passed via 'next_tmo_ev'. + * + * The given event should not be touched after calling this function until it + * has been received again or after the timeout is successfully cancelled and + * event returned. + * + * A regular periodic timeout (i.e. not a ring one) will stop if em_tmo_ack() + * returns an error other than related to timing. Unless the timeout was + * canceled, the implementation will call the EM error handler in this case + * (the error/exception can be handled also there). + * + * em_tmo_ack() can only be used with periodic timeouts. + * + * @param tmo Timeout handle + * @param next_tmo_ev Next timeout event handle. + * Can be the received one for regular periodic timeouts. + * Must be the received one for periodic ring timeouts. + * + * @retval EM_OK Success, event taken. + * @retval EM_ERR_CANCELED Timer cancelled, last event - no further timeout + * events coming, event not taken. + * @retval (other_codes) Failure, event not taken. + */ +em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev); + +/** + * Get a list of currently active timers. + * + * The timer handles returned via 'tmr_list' can be used for further timer + * queries or to destroy existing timers. + * + * The return value always reflects the actual number of timers in the + * EM instance but the output parameter 'tmr_list' is only written up to the + * given 'max' length. + * + * Note that the return value (number of timers) can be greater than the given + * 'max'. It is the user's responsibility to check the return value against the + * given 'max'. + * + * To only get the current number of active timers, without any timer handles + * output, use the following: num_timers = em_timer_get_all(NULL, 0); + * + * @param[out] tmr_list Pointer to an array of timer handles. + * Use NULL if only interested in the return value. + * @param max Max number of handles that can be written into + * 'tmr_list'. 'max' is ignored if 'tmr_list' is NULL. + * + * @return The number of active timers + */ +int em_timer_get_all(em_timer_t *tmr_list, int max); + +/** + * Get timer attributes + * + * Returns the actual capabilities of the given timer. + * + * @param tmr Timer handle + * @param[out] tmr_attr Pointer to em_timer_attr_t to fill + * + * @return EM_OK on success + */ +em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr); + +/** + * Returns the timer frequency, i.e. ticks per second, for the given timer. + * + * Can be used to convert real time to timer specific ticks. + * + * @param tmr Timer handle + * + * @return ticks per second (Hz), or 0 for non-existing timer + */ +uint64_t em_timer_get_freq(em_timer_t tmr); + +/** + * Convert timer ticks to nanoseconds (ns) + * + * @param tmr Valid timer handle + * @param ticks Timer specific ticks to convert + * + * @return converted amount in ns + */ +uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks); + +/** + * Convert nanoseconds (ns) to timer ticks + * + * @param tmr Valid timer handle + * @param ns ns value to convert + * + * @return converted amount in timer ticks + */ +em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns); + +/** + * Returns the current state of the given timeout. + * + * Note that the returned state may change at any time if the timeout expires + * or is manipulated by other threads. + * + * @param tmo Timeout handle + * + * @return current timeout state (EM_TMO_STATE_UNKNOWN on error) + * + * @see em_tmo_state_t + */ +em_tmo_state_t em_tmo_get_state(em_tmo_t tmo); + +/** + * Returns the statistic counters for a timeout. + * + * Returns a snapshot of the current counters of the given timeout. + * Statistics can be accessed while the timeout is valid, i.e. tmo created but + * not deleted. + * + * Counter support is optional. If counters are not supported, the function + * returns 'EM_ERR_NOT_IMPLEMENTED'. + * A quick way to detect whether counters are supported is to call the function + * with 'stat=NULL' and check the return value. + * + * @param tmo Timeout handle + * @param[out] stat Pointer to em_tmo_stats_t to receive the values (NULL ok) + * + * @return EM_OK on success + */ +em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat); + +/** + * Ask if the given event is currently used as a timeout indication event. + * + * This function can be used with any valid event handle to ask if it is used as + * a timeout indication event. + * Events are updated to a tmo-type when going through the timer API. + * @note Because a received timeout event is owned by the application, and not + * necessarily passing through the timer API anymore, this type will not be + * reset until the event is freed, reused as another timeout or explicitly reset + * by setting the 'reset' argument to true. This reset should be done if + * re-using the received timeout event for something else than a timeout to + * avoid wrong interpretations. + * + * A successful timeout cancel (event returned) will reset the event type to + * 'EM_TMO_TYPE_NONE'. + * + * @note The 'reset' argument is ignored if the given event is of type + * 'EM_EVENT_TYPE_TIMER_IND'. + * + * The related tmo handle can be retrieved via the 'tmo' argument. This + * can be useful when calling em_tmo_ack() for periodic timeouts: + * @code + * em_tmo_t tmo; + * + * if (em_tmo_get_type(event, &tmo, false) == EM_TMO_TYPE_PERIODIC) + * retval = em_tmo_ack(tmo, event); + * @endcode + * + * @param event Event handle to check. + * @param[out] tmo em_tmo_t pointer to output the related tmo handle. + * Use NULL if not interested in the tmo handle. + * @param reset Set to 'true' to reset the event's tmo type to + * 'EM_TMO_TYPE_NONE' to e.g. enable non-timer related reuse + * of the event. + * + * @return The type of the timeout or 'EM_TMO_TYPE_NONE' if event is not related + * to a timeout + * @see em_tmo_type_t + */ +em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset); + +/** + * Returns the optional user pointer for a periodic ring timeout. + * + * Can only be used with an event received as a timeout event for a periodic + * ring, i.e. for events of type 'EM_EVENT_TYPE_TIMER_IND' only. Other event + * types will return NULL. + * + * @param event Event received as timeout + * @param[out] tmo Optionally returns associated tmo handle. NULL ok. + * + * @return A pointer given when creating the associated tmo or + * NULL if the event is not a ring timeout event. + */ +void *em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo); + +/** + * Returns the associated timer handle from a timeout handle + * + * The associated timer handle is returned from a valid timeout. Can be used to + * e.g. read the current timer tick without having the timer handle: + * @code + * em_timer_tick_t tick = em_timer_current_tick(em_tmo_get_timer(tmo)); + * @endcode + * + * @param tmo Valid timeout handle + * + * @return The associated timer handle or + * 'EM_TIMER_UNDEF' if the tmo is not valid + */ +em_timer_t em_tmo_get_timer(em_tmo_t tmo); + +/** + * Convert a timer handle to an unsigned integer. + * + * @param timer Timer handle to be converted. + * @return A 'uint64_t' value that can be used to print/display the handle + * + * @note This routine is intended to be used for diagnostic purposes + * to enable applications to e.g. generate a printable value that represents + * an em_timer_t handle. + */ +uint64_t em_timer_to_u64(em_timer_t timer); + +/** + * Convert a timeout handle to an unsigned integer. + * + * @param tmo Timeout handle to be converted. + * @return A 'uint64_t' value that can be used to print/display the handle. + * + * @note This routine is intended to be used for diagnostic purposes + * to enable applications to e.g. generate a printable value that represents + * an em_tmo_t handle. + */ +uint64_t em_tmo_to_u64(em_tmo_t tmo); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_TIMER_H_ */ diff --git a/include/event_machine/platform/event_machine_init.h b/include/event_machine/platform/event_machine_init.h index 13169dfd..7ee6a810 100644 --- a/include/event_machine/platform/event_machine_init.h +++ b/include/event_machine/platform/event_machine_init.h @@ -1,320 +1,328 @@ -/* - * Copyright (c) 2018, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_INIT_H_ -#define EVENT_MACHINE_INIT_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup init Initialization and termination - * Event Machine initialization and termination - * @{ - * - * The Event Machine must be initialized before use. One core that will be part - * of EM calls em_init(). Additionally, after the user has set up the threads, - * or processes and pinned those to HW-cores, each participating core, i.e. - * EM-core, needs to run em_init_core(). Only now is an EM-core ready to use the - * other EM API functions and can finally enter the dispatch-loop via - * em_dispath() on each core that should handle events. - * - * The EM termination sequence runs in the opposite order: each core needs to - * call em_term_core() before one last call to em_term(). - * - * The 'em_conf_t' type given to em_init() and em_term() is HW/platform specific - * and is defined in event_machine_hw_types.h - * - * Do not include this from the application, event_machine.h will - * do it for you. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Event Machine run-time configuration options given at startup to em_init() - * - * The 'em_conf_t' struct should be initialized with em_conf_init() before use. - * This initialization provides better backwards compatibility since all options - * will be set to default values. - * The user must further set the needed configuration and call em_init(): - * - * @code - * em_conf_t conf; - * em_conf_init(&conf); // init with default values - * conf.thread_per_core = 1; - * ... - * conf.core_count = N; - * conf.phys_mask = set N bits; // use em_core_mask_...() functions to set - * ... - * ret = em_init(&conf); // on one core - * ... - * ret = em_init_core(); // on each of the 'conf.core_count' cores - * @endcode - * - * Content is copied into EM by em_init(). - * - * @note Several EM options are configured through compile-time defines. - * Run-time options allow using the same EM-lib with different configs. - * Also see the overridable EM runtime config file values, - * default file: config/em-odp.config - * - * @see em_conf_init(), em_init() - */ -typedef struct { - /** - * EM device id - use different device ids for each EM instance or - * remote EM device that need to communicate with each other. - * Default value is 0. - */ - uint16_t device_id; - - /** - * Event Timer: enable=1, disable=0. - * Default value is 0 (disable). - */ - int event_timer; - - /** - * RunMode: EM run with one thread per core. - * Set 'true' to select thread-per-core mode. - * This is the recommended mode, but the user must explicitly set it to - * enable. Default value is 0. - * @note The user must set either 'thread_per_core' or - * 'process_per_core' but not both. - */ - int thread_per_core; - - /** - * RunMode: EM run with one process per core. - * Set 'true' to select process-per-core mode. Default value is 0. - * @note The user must set either 'thread_per_core' or - * 'process_per_core' but not both. - */ - int process_per_core; - - /** - * Number of EM-cores (== number of EM-threads or EM-processes). - * The 'core_count' must match the number of bits set in 'phys_mask'. - * EM-cores will be enumerated from 0 to 'core_count-1' regardless of - * the actual physical core ids. - * Default value is 0 and needs to be changed by the user. - */ - int core_count; - - /** - * Physical core mask, exactly listing the physical CPU cores to be used - * by EM (this is a physical core mask even though the 'em_core_mask_t' - * type is used). - * Default value is all-0 and needs to be changed by the user. - * @note EM otherwise operates on logical cores, i.e. enumerated - * contiguously from 0 to 'core_count-1' and a logical - * EM core mask has 'core_count' consecutively set bits. - * Example - physical mask vs. corresponding EM core mask: - * .core_count = 8 - * .physmask: 0xf0f0 (binary: 1111 0000 1111 0000 - 8 set bits) - * = 8 phys-cores (phys-cores 4-7,12-15) - * ==> EM-mask: 0x00ff (0000 0000 1111 1111 binary) - 8 EM cores - * = 8 EM-cores (EM-cores 0-7) - */ - em_core_mask_t phys_mask; - - /** - * Pool configuration for the EM default pool (EM_POOL_DEFAULT). - * Default value is set by em_pool_cfg_init() and needs to be changed - * by the user. - * - * Note that if the default pool configuration is also given in the - * config file through option 'startup_pools', it will override this - * default pool configuration. - */ - em_pool_cfg_t default_pool_cfg; - - /** - * EM log functions. - * Default values are NULL and causes EM to use internal default - * log-functions. - */ - struct { - /** EM log function, user overridable, variable number of args*/ - em_log_func_t log_fn; - /** EM log function, user overridable, va_list */ - em_vlog_func_t vlog_fn; - } log; - - /** EM event/pkt input related functions and config */ - struct { - /** - * User provided function, called from within the EM-dispatch - * loop, mainly for polling various input sources for events or - * pkts and then enqueue them into EM. - * Set to 'NULL' if not needed (default). - */ - em_input_poll_func_t input_poll_fn; - /** - * EM core mask to control which EM-cores (0 to 'core_count-1') - * input_poll_fn() will be called on. - * The provided mask has to be equal or a subset of the - * EM core mask with all 'core_count' bits set. - * A zero mask means execution on _all_ EM cores (default). - */ - em_core_mask_t input_poll_mask; - } input; - - /** EM event/pkt output related functions and config */ - struct { - /** - * User provided function, called from within the EM-dispatch - * loop, mainly for 'periodical' draining of buffered output to - * make sure events/pkts are eventually sent out even if the - * rate is low or stops for a while. - * Set to 'NULL' if not needed (default). - */ - em_output_drain_func_t output_drain_fn; - /** - * EM core mask to control which EM-cores (0 to 'core_count-1') - * output_drain_fn() will be called on. - * The provided mask has to be equal or a subset of the - * EM core mask with all 'core_count' bits set. - * A zero mask means execution on _all_ EM cores (default). - */ - em_core_mask_t output_drain_mask; - } output; - - /** - * User provided API callback hooks. - * Set only the needed hooks to avoid performance degradation. - * Only used if EM_API_HOOKS_ENABLE != 0 - */ - em_api_hooks_t api_hooks; - - /** - * User provided idle callback hooks. - * Set only the needed hooks to avoid performance degradation. - * Only used if EM_IDLE_HOOKS_ENABLE != 0 - */ - em_idle_hooks_t idle_hooks; -} em_conf_t; - -/** - * Initialize configuration parameters for em_init() - * - * Initialize em_conf_t to default values for all fields. - * After initialization, the user further needs to set the mandatory fields of - * 'em_conf_t' before calling em_init(). - * Always initialize 'conf' first with em_conf_init(&conf) to - * ensure backwards compatibility with potentially added new options. - * - * @param conf Address of the em_conf_t to be initialized - * - * @see em_init() - */ -void em_conf_init(em_conf_t *conf); - -/** - * Initialize the Event Machine. - * - * Must be called once at startup. Additionally each EM-core needs to call the - * em_init_core() function before using any further EM API functions/resources. - * - * @param conf EM runtime config options, - * HW/platform specific: see event_machine_hw_types.h - * - * @return EM_OK if successful. - * - * @see em_init_core() for EM-core specific init after em_init(). - */ -em_status_t em_init(const em_conf_t *conf); - -/** - * Initialize an EM-core. - * - * Must be called once by each EM-core (= process, thread or bare metal core). - * EM queues, EOs, queue groups etc. can be created after a successful return - * from this function. - * Note, the thread or process must be pinned to a unique physical core before - * running em_init_core(). - * - * @return EM_OK if successful. - * - * @see em_init() - */ -em_status_t em_init_core(void); - -/** - * Terminate the Event Machine. - * - * Called once at exit. Additionally, before the one call to em_term(), - * each EM-core needs to call the em_term_core() function to free up local - * resources. - * - * @param conf EM runtime config options - * - * @return EM_OK if successful. - * - * @see em_term_core() for EM-core specific termination before em_term(). - */ -em_status_t em_term(const em_conf_t *conf); - -/** - * Terminate an EM-core. - * - * Called by each EM-core (= process, thread or bare metal core) - * before one call to em_term(). - * - * @return EM_OK if successful. - * - * @see em_term() - */ -em_status_t em_term_core(void); - -/** - * Return the EM device-id for this instance. - * - * This is a convenience function that returns the EM device-id given by the - * user to em_init() via the em_conf_t::device_id field. - * - * The function should only be called after a successful EM initialization. - * - * @return the device-id of this EM instance. - */ -uint16_t em_device_id(void); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_INIT_H_ */ +/* + * Copyright (c) 2018, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_INIT_H_ +#define EVENT_MACHINE_INIT_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup init Initialization and termination + * Event Machine initialization and termination + * @{ + * + * The Event Machine must be initialized before use. One core that will be part + * of EM calls em_init(). Additionally, after the user has set up the threads, + * or processes and pinned those to HW-cores, each participating core, i.e. + * EM-core, needs to run em_init_core(). Only now is an EM-core ready to use the + * other EM API functions and can finally enter the dispatch-loop via + * em_dispath() on each core that should handle events. + * + * The EM termination sequence runs in the opposite order: each core needs to + * call em_term_core() before one last call to em_term(). + * + * The 'em_conf_t' type given to em_init() and em_term() is HW/platform specific + * and is defined in event_machine_hw_types.h + * + * Do not include this from the application, event_machine.h will + * do it for you. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Event Machine run-time configuration options given at startup to em_init() + * + * The 'em_conf_t' struct should be initialized with em_conf_init() before use. + * This initialization provides better backwards compatibility since all options + * will be set to default values. + * The user must further set the needed configuration and call em_init(): + * + * @code + * em_conf_t conf; + * em_conf_init(&conf); // init with default values + * conf.thread_per_core = 1; + * ... + * conf.core_count = N; + * conf.phys_mask = set N bits; // use em_core_mask_...() functions to set + * ... + * ret = em_init(&conf); // on one core + * ... + * ret = em_init_core(); // on each of the 'conf.core_count' cores + * @endcode + * + * Content is copied into EM by em_init(). + * + * @note Several EM options are configured through compile-time defines. + * Run-time options allow using the same EM-lib with different configs. + * Also see the overridable EM runtime config file values, + * default file: config/em-odp.config + * + * @see em_conf_init(), em_init() + */ +typedef struct { + /** + * EM device id - use different device ids for each EM instance or + * remote EM device that need to communicate with each other. + * Default value is 0. + */ + uint16_t device_id; + + /** + * Event Timer: enable=1, disable=0. + * Default value is 0 (disable). + */ + int event_timer; + + /** + * RunMode: EM run with one thread per core. + * Set 'true' to select thread-per-core mode. + * This is the recommended mode, but the user must explicitly set it to + * enable. Default value is 0. + * @note The user must set either 'thread_per_core' or + * 'process_per_core' but not both. + */ + int thread_per_core; + + /** + * RunMode: EM run with one process per core. + * Set 'true' to select process-per-core mode. Default value is 0. + * @note The user must set either 'thread_per_core' or + * 'process_per_core' but not both. + */ + int process_per_core; + + /** + * Number of EM-cores (== number of EM-threads or EM-processes). + * The 'core_count' must match the number of bits set in 'phys_mask'. + * EM-cores will be enumerated from 0 to 'core_count-1' regardless of + * the actual physical core ids. + * Default value is 0 and needs to be changed by the user. + */ + int core_count; + + /** + * Physical core mask, exactly listing the physical CPU cores to be used + * by EM (this is a physical core mask even though the 'em_core_mask_t' + * type is used). + * Default value is all-0 and needs to be changed by the user. + * @note EM otherwise operates on logical cores, i.e. enumerated + * contiguously from 0 to 'core_count-1' and a logical + * EM core mask has 'core_count' consecutively set bits. + * Example - physical mask vs. corresponding EM core mask: + * .core_count = 8 + * .physmask: 0xf0f0 (binary: 1111 0000 1111 0000 - 8 set bits) + * = 8 phys-cores (phys-cores 4-7,12-15) + * ==> EM-mask: 0x00ff (0000 0000 1111 1111 binary) - 8 EM cores + * = 8 EM-cores (EM-cores 0-7) + */ + em_core_mask_t phys_mask; + + /** + * Pool configuration for the EM default pool (EM_POOL_DEFAULT). + * Default value is set by em_pool_cfg_init() and needs to be changed + * by the user. + * + * Note that if the default pool configuration is also given in the + * config file through option 'startup_pools', it will override this + * default pool configuration. + */ + em_pool_cfg_t default_pool_cfg; + + /** + * EM log functions. + * Default values are NULL and causes EM to use internal default + * log-functions. + */ + struct { + /** EM log function, user overridable, variable number of args*/ + em_log_func_t log_fn; + /** EM log function, user overridable, va_list */ + em_vlog_func_t vlog_fn; + } log; + + /** EM event/pkt input related functions and config */ + struct { + /** + * User provided function, called from within the EM-dispatch + * loop, mainly for polling various input sources for events or + * pkts and then enqueue them into EM. + * Set to 'NULL' if not needed (default). + */ + em_input_poll_func_t input_poll_fn; + /** + * EM core mask to control which EM-cores (0 to 'core_count-1') + * input_poll_fn() will be called on. + * The provided mask has to be equal or a subset of the + * EM core mask with all 'core_count' bits set. + * A zero mask means execution on _all_ EM cores (default). + */ + em_core_mask_t input_poll_mask; + } input; + + /** EM event/pkt output related functions and config */ + struct { + /** + * User provided function, called from within the EM-dispatch + * loop, mainly for 'periodical' draining of buffered output to + * make sure events/pkts are eventually sent out even if the + * rate is low or stops for a while. + * Set to 'NULL' if not needed (default). + */ + em_output_drain_func_t output_drain_fn; + /** + * EM core mask to control which EM-cores (0 to 'core_count-1') + * output_drain_fn() will be called on. + * The provided mask has to be equal or a subset of the + * EM core mask with all 'core_count' bits set. + * A zero mask means execution on _all_ EM cores (default). + */ + em_core_mask_t output_drain_mask; + } output; + + /** + * User provided API callback hooks. + * Set only the needed hooks to avoid performance degradation. + * Only used if EM_API_HOOKS_ENABLE != 0 + */ + em_api_hooks_t api_hooks; + + /** + * User provided idle callback hooks. + * Set only the needed hooks to avoid performance degradation. + * Only used if EM_IDLE_HOOKS_ENABLE != 0 + */ + em_idle_hooks_t idle_hooks; + + /** + * Internal check - don't touch! + * + * EM will verify that em_conf_init() has been called + * before calling em_init(). + */ + uint32_t __internal_check; +} em_conf_t; + +/** + * Initialize configuration parameters for em_init() + * + * Initialize em_conf_t to default values for all fields. + * After initialization, the user further needs to set the mandatory fields of + * 'em_conf_t' before calling em_init(). + * Always initialize 'conf' first with em_conf_init(&conf) to + * ensure backwards compatibility with potentially added new options. + * + * @param conf Address of the em_conf_t to be initialized + * + * @see em_init() + */ +void em_conf_init(em_conf_t *conf); + +/** + * Initialize the Event Machine. + * + * Must be called once at startup. Additionally each EM-core needs to call the + * em_init_core() function before using any further EM API functions/resources. + * + * @param conf EM runtime config options, + * HW/platform specific: see event_machine_hw_types.h + * + * @return EM_OK if successful. + * + * @see em_init_core() for EM-core specific init after em_init(). + */ +em_status_t em_init(const em_conf_t *conf); + +/** + * Initialize an EM-core. + * + * Must be called once by each EM-core (= process, thread or bare metal core). + * EM queues, EOs, queue groups etc. can be created after a successful return + * from this function. + * Note, the thread or process must be pinned to a unique physical core before + * running em_init_core(). + * + * @return EM_OK if successful. + * + * @see em_init() + */ +em_status_t em_init_core(void); + +/** + * Terminate the Event Machine. + * + * Called once at exit. Additionally, before the one call to em_term(), + * each EM-core needs to call the em_term_core() function to free up local + * resources. + * + * @param conf EM runtime config options + * + * @return EM_OK if successful. + * + * @see em_term_core() for EM-core specific termination before em_term(). + */ +em_status_t em_term(const em_conf_t *conf); + +/** + * Terminate an EM-core. + * + * Called by each EM-core (= process, thread or bare metal core) + * before one call to em_term(). + * + * @return EM_OK if successful. + * + * @see em_term() + */ +em_status_t em_term_core(void); + +/** + * Return the EM device-id for this instance. + * + * This is a convenience function that returns the EM device-id given by the + * user to em_init() via the em_conf_t::device_id field. + * + * The function should only be called after a successful EM initialization. + * + * @return the device-id of this EM instance. + */ +uint16_t em_device_id(void); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_INIT_H_ */ diff --git a/include/event_machine/platform/event_machine_odp_ext.h b/include/event_machine/platform/event_machine_odp_ext.h index a4fc8f1f..c483ce13 100644 --- a/include/event_machine/platform/event_machine_odp_ext.h +++ b/include/event_machine/platform/event_machine_odp_ext.h @@ -1,315 +1,315 @@ -/* - * Copyright (c) 2015-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * @defgroup em_odp_ext Conversions & extensions - * Event Machine ODP API extensions and conversion functions between EM and ODP - * @{ - */ - -#ifndef EVENT_MACHINE_ODP_EXT_H -#define EVENT_MACHINE_ODP_EXT_H - -#pragma GCC visibility push(default) - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -/** - * Get the associated ODP queue. - * - * The given EM queue must have been created with em_queue_create...() APIs. - * - * @param queue EM queue - * - * @return odp queue if successful, ODP_QUEUE_INVALID on error - */ -odp_queue_t em_odp_queue_odp(em_queue_t queue); - -/** - * Get the associated EM queue. - * - * The associated EM queue must have been created with em_queue_create...() APIs - * - * @param queue ODP queue - * - * @return em queue if successful, EM_QUEUE_UNDEF on error - */ -em_queue_t em_odp_queue_em(odp_queue_t queue); - -/** - * @brief Map the given scheduled ODP pktin event queues to new EM queues. - * - * Creates new EM queues and maps them to use the given scheduled ODP pktin - * event queues. - * Enables direct scheduling of packets as EM events via EM queues. - * EM queues based on scheduled ODP pktin queues are a bit special in how they - * are created and how they are deleted: - * - creation is done via this function by providing the already set up - * scheduled ODP pktin event queues to use. - * - deletion of one of the returned EM queues will not delete the underlying - * ODP pktin event queue. The ODP queues in question are deleted when - * the ODP pktio is terminated. - * The scheduled ODP pktin event queues must have been set up with an - * ODP schedule group that belongs to an existing EM queue group. Also the used - * priority must mappable to an EM priority. - * - * Setup example: - * @code - * // Configure ODP pktin queues - * odp_pktin_queue_param_t pktin_queue_param; - * odp_pktin_queue_param_init(&pktin_queue_param); - * pktin_queue_param.num_queues = num; - * pktin_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED; - * pktin_queue_param.queue_param.sched.prio = ODP prio mappable to EM prio - * pktin_queue_param.queue_param.sched.sync = PARALLEL | ATOMIC | ORDERED; - * pktin_queue_param.queue_param.sched.group = em_odp_qgrp2odp(EM qgroup); - * ... - * ret = odp_pktin_queue_config(pktio, &pktin_queue_param); - * if (ret < 0) - * error(...); - * - * // Obtain ODP pktin event queues used for scheduled packet input - * odp_queue_t pktin_sched_queues[num]; - * ret = odp_pktin_event_queue(pktio, pktin_sched_queues['out'], num); - * if (ret != num) - * error(...); - * - * // Create EM queues mapped to the scheduled ODP pktin event queues - * em_queue_t queues_em[num]; - * ret = em_odp_pktin_event_queues2em(pktin_sched_queues['in'], - * queues_em['out'], num); - * if (ret != num) - * error(...); - * - * // Add the EM queues to an EM EO and once the EO has been started it - * // will receive pktio events directly from the scheduler. - * for (int i = 0; i < num; i++) - * err = em_eo_add_queue_sync(eo, queues_em); - * @endcode - * - * @param[in] odp_pktin_evqueues Array of ODP pktin event queues to convert to - * EM-queues. The array must contain 'num' valid - * ODP-queue handles (as returned by the - * odp_pktin_event_queue() function). - * @param[out] queues Output array into which the corresponding - * EM-queue handles are written. - * Array must fit 'num' entries. - * @param num Number of entries in 'odp_pktin_evqueues[]' - * and 'queues[]'. - * @return int Number of EM queues created that correspond to the given - * ODP pktin event queues - * @retval <0 on failure - */ -int em_odp_pktin_event_queues2em(const odp_queue_t odp_pktin_evqueues[/*num*/], - em_queue_t queues[/*out:num*/], int num); - -/** - * Get the EM event header size. - * - * Needed e.g. when configuring a separate ODP packet pool and have pktio - * allocate events usable by EM from there: - * @code - * odp_pool_param_t::pkt.uarea_size = em_odp_event_hdr_size(); - * @endcode - * - * @return EM event header size. - */ -uint32_t em_odp_event_hdr_size(void); - -/** - * Convert EM event handle to ODP event handle. - * - * @param event EM-event handle - * - * @return ODP event handle. - */ -odp_event_t em_odp_event2odp(em_event_t event); - -/** - * Convert EM event handles to ODP event handles - * - * @param events Array of EM-events to convert to ODP-events. - * The 'events[]' array must contain 'num' valid - * event handles. - * @param[out] odp_events Output array into which the corresponding ODP-event - * handles are written. Array must fit 'num' entries. - * @param num Number of entries in 'events[]' and 'odp_events[]'. - */ -void em_odp_events2odp(const em_event_t events[/*num*/], - odp_event_t odp_events[/*out:num*/], int num); - -/** - * Convert ODP event handle to EM event handle. - * - * The event must have been allocated by EM originally. - * - * @param odp_event ODP-event handle - * - * @return EM event handle. - */ -em_event_t em_odp_event2em(odp_event_t odp_event); - -/** - * Convert EM event handles to ODP event handles - * - * @param odp_events Array of ODP-events to convert to EM-events. - * The 'odp_events[]' array must contain 'num' valid - * ODP-event handles. - * @param[out] events Output array into which the corresponding EM-event - * handles are written. Array must fit 'num' entries. - * @param num Number of entries in 'odp_events[]' and 'events[]'. - */ -void em_odp_events2em(const odp_event_t odp_events[/*num*/], - em_event_t events[/*out:num*/], int num); - -/** - * @brief Get the ODP pools used as subpools in a given EM event pool. - * - * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool - * is an ODP pool. This function outputs the ODP pool handles of these subpools - * into a user-provided array and returns the number of handles written. - * - * The obtained ODP pools must not be deleted or alterede outside of EM, - * e.g. these ODP pools must only be deleted as part of an EM event pool - * using em_pool_delete(). - * - * ODP pool handles obtained through this function can be used to - * - configure ODP pktio to use an ODP pool created via EM (allows for - * better ESV tracking) - * - print ODP-level pool statistics with ODP APIs etc. - * - * Note that direct allocations and free:s via ODP APIs will bypass - * EM checks (e.g. ESV) and might cause errors unless properely handled: - * - use em_odp_event2em() to initialize as an EM event - * - use em_event_mark_free() before ODP-free operations (SW- or HW-free) - * - * @param pool EM event pool handle. - * @param[out] odp_pools Output array to be filled with the ODP pools used as - * subpools in the given EM event pool. The array must - * fit 'num' entries. - * @param num Number of entries in the 'odp_pools[]' array. - * Using 'num=EM_MAX_SUBPOOLS' will always be large - * enough to fit all subpools in the EM event pool. - * - * @return The number of ODP pools filled into 'odp_pools[]' - */ -int em_odp_pool2odp(em_pool_t pool, odp_pool_t odp_pools[/*out*/], int num); - -/** - * @brief Get the EM event pool that the given ODP pool belongs to - * - * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool - * is an ODP pool. This function returns the EM event pool that contains the - * given ODP pool as a subpool. - * - * @param odp_pool ODP pool - * - * @return The EM event pool that contains the subpool 'odp_pool' or - * EM_POOL_UNDEF if 'odp_pool' is not part of any EM event pool. - */ -em_pool_t em_odp_pool2em(odp_pool_t odp_pool); - -/** - * @brief Get the ODP schedule group that corresponds to the given EM queue group - * - * @param queue_group - * - * @return ODP schedule group handle - * @retval ODP_SCHED_GROUP_INVALID on error - */ -odp_schedule_group_t em_odp_qgrp2odp(em_queue_group_t queue_group); - -/** - * Enqueue external packets into EM - * - * Enqueue packets from outside of EM into EM queues for processing. - * This function will initialize the odp packets properly as EM events before - * enqueueing them into EM. - * The odp packets might be polled from pktio or some other external source, - * e.g. the em_conf_t::input.input_poll_fn() function (see em_init()) can use - * this API to enqueue polled packets into EM queues. - * Inside EM, the application must use em_send...() instead to send/enqueue - * events into EM queues. - * - * @param pkt_tbl Array of external ODP-packets to enqueue into EM as events. - * The 'pkt_tbl[]' array must contain 'num' valid ODP packet - * handles. - * @param num The number of packets in the 'pkt_tbl[]' array, must be >0. - * @param queue EM queue into which to send/enqueue the packets as EM-events. - * - * @return The number of ODP packets successfully send/enqueued as EM-events - */ -int em_odp_pkt_enqueue(const odp_packet_t pkt_tbl[/*num*/], int num, - em_queue_t queue); - -/** - * @brief Get the odp timer_pool from EM timer handle - * - * Returns the corresponding odp timer_pool from a valid EM timer handle. - * This can be used for e.g. debugging. - * - * DO NOT use any odp apis directly to modify the odp timer_pool created by EM. - * - * @param tmr em timer handle - * - * @return odp timer_pool or ODP_TIMER_POOL_INVALID on failure - */ -odp_timer_pool_t em_odp_timer2odp(em_timer_t tmr); - -/** - * @brief Get the odp timer from EM timeout handle - * - * Returns the corresponding odp timer from a valid EM tmo handle. - * This can be used for e.g. debugging. - * - * DO NOT use any odp apis directly to modify the odp timer created by EM. - * - * @param tmo em timeout handle - * - * @return odp timer or ODP_TIMER_INVALID on failure - */ -odp_timer_t em_odp_tmo2odp(em_tmo_t tmo); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_ODP_EXT_H */ +/* + * Copyright (c) 2015-2021, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * @defgroup em_odp_ext Conversions & extensions + * Event Machine ODP API extensions and conversion functions between EM and ODP + * @{ + */ + +#ifndef EVENT_MACHINE_ODP_EXT_H +#define EVENT_MACHINE_ODP_EXT_H + +#pragma GCC visibility push(default) + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/** + * Get the associated ODP queue. + * + * The given EM queue must have been created with em_queue_create...() APIs. + * + * @param queue EM queue + * + * @return odp queue if successful, ODP_QUEUE_INVALID on error + */ +odp_queue_t em_odp_queue_odp(em_queue_t queue); + +/** + * Get the associated EM queue. + * + * The associated EM queue must have been created with em_queue_create...() APIs + * + * @param queue ODP queue + * + * @return em queue if successful, EM_QUEUE_UNDEF on error + */ +em_queue_t em_odp_queue_em(odp_queue_t queue); + +/** + * @brief Map the given scheduled ODP pktin event queues to new EM queues. + * + * Creates new EM queues and maps them to use the given scheduled ODP pktin + * event queues. + * Enables direct scheduling of packets as EM events via EM queues. + * EM queues based on scheduled ODP pktin queues are a bit special in how they + * are created and how they are deleted: + * - creation is done via this function by providing the already set up + * scheduled ODP pktin event queues to use. + * - deletion of one of the returned EM queues will not delete the underlying + * ODP pktin event queue. The ODP queues in question are deleted when + * the ODP pktio is terminated. + * The scheduled ODP pktin event queues must have been set up with an + * ODP schedule group that belongs to an existing EM queue group. Also the used + * priority must mappable to an EM priority. + * + * Setup example: + * @code + * // Configure ODP pktin queues + * odp_pktin_queue_param_t pktin_queue_param; + * odp_pktin_queue_param_init(&pktin_queue_param); + * pktin_queue_param.num_queues = num; + * pktin_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED; + * pktin_queue_param.queue_param.sched.prio = ODP prio mappable to EM prio + * pktin_queue_param.queue_param.sched.sync = PARALLEL | ATOMIC | ORDERED; + * pktin_queue_param.queue_param.sched.group = em_odp_qgrp2odp(EM qgroup); + * ... + * ret = odp_pktin_queue_config(pktio, &pktin_queue_param); + * if (ret < 0) + * error(...); + * + * // Obtain ODP pktin event queues used for scheduled packet input + * odp_queue_t pktin_sched_queues[num]; + * ret = odp_pktin_event_queue(pktio, pktin_sched_queues['out'], num); + * if (ret != num) + * error(...); + * + * // Create EM queues mapped to the scheduled ODP pktin event queues + * em_queue_t queues_em[num]; + * ret = em_odp_pktin_event_queues2em(pktin_sched_queues['in'], + * queues_em['out'], num); + * if (ret != num) + * error(...); + * + * // Add the EM queues to an EM EO and once the EO has been started it + * // will receive pktio events directly from the scheduler. + * for (int i = 0; i < num; i++) + * err = em_eo_add_queue_sync(eo, queues_em); + * @endcode + * + * @param[in] odp_pktin_evqueues Array of ODP pktin event queues to convert to + * EM-queues. The array must contain 'num' valid + * ODP-queue handles (as returned by the + * odp_pktin_event_queue() function). + * @param[out] queues Output array into which the corresponding + * EM-queue handles are written. + * Array must fit 'num' entries. + * @param num Number of entries in 'odp_pktin_evqueues[]' + * and 'queues[]'. + * @return int Number of EM queues created that correspond to the given + * ODP pktin event queues + * @retval <0 on failure + */ +int em_odp_pktin_event_queues2em(const odp_queue_t odp_pktin_evqueues[/*num*/], + em_queue_t queues[/*out:num*/], int num); + +/** + * Get the EM event header size. + * + * Needed e.g. when configuring a separate ODP packet pool and have pktio + * allocate events usable by EM from there: + * @code + * odp_pool_param_t::pkt.uarea_size = em_odp_event_hdr_size(); + * @endcode + * + * @return EM event header size. + */ +uint32_t em_odp_event_hdr_size(void); + +/** + * Convert EM event handle to ODP event handle. + * + * @param event EM-event handle + * + * @return ODP event handle. + */ +odp_event_t em_odp_event2odp(em_event_t event); + +/** + * Convert EM event handles to ODP event handles + * + * @param events Array of EM-events to convert to ODP-events. + * The 'events[]' array must contain 'num' valid + * event handles. + * @param[out] odp_events Output array into which the corresponding ODP-event + * handles are written. Array must fit 'num' entries. + * @param num Number of entries in 'events[]' and 'odp_events[]'. + */ +void em_odp_events2odp(const em_event_t events[/*num*/], + odp_event_t odp_events[/*out:num*/], int num); + +/** + * Convert ODP event handle to EM event handle. + * + * The event must have been allocated by EM originally. + * + * @param odp_event ODP-event handle + * + * @return EM event handle. + */ +em_event_t em_odp_event2em(odp_event_t odp_event); + +/** + * Convert EM event handles to ODP event handles + * + * @param odp_events Array of ODP-events to convert to EM-events. + * The 'odp_events[]' array must contain 'num' valid + * ODP-event handles. + * @param[out] events Output array into which the corresponding EM-event + * handles are written. Array must fit 'num' entries. + * @param num Number of entries in 'odp_events[]' and 'events[]'. + */ +void em_odp_events2em(const odp_event_t odp_events[/*num*/], + em_event_t events[/*out:num*/], int num); + +/** + * @brief Get the ODP pools used as subpools in a given EM event pool. + * + * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool + * is an ODP pool. This function outputs the ODP pool handles of these subpools + * into a user-provided array and returns the number of handles written. + * + * The obtained ODP pools must not be deleted or alterede outside of EM, + * e.g. these ODP pools must only be deleted as part of an EM event pool + * using em_pool_delete(). + * + * ODP pool handles obtained through this function can be used to + * - configure ODP pktio to use an ODP pool created via EM (allows for + * better ESV tracking) + * - print ODP-level pool statistics with ODP APIs etc. + * + * Note that direct allocations and free:s via ODP APIs will bypass + * EM checks (e.g. ESV) and might cause errors unless properly handled: + * - use em_odp_event2em() to initialize as an EM event + * - use em_event_mark_free() before ODP-free operations (SW- or HW-free) + * + * @param pool EM event pool handle. + * @param[out] odp_pools Output array to be filled with the ODP pools used as + * subpools in the given EM event pool. The array must + * fit 'num' entries. + * @param num Number of entries in the 'odp_pools[]' array. + * Using 'num=EM_MAX_SUBPOOLS' will always be large + * enough to fit all subpools in the EM event pool. + * + * @return The number of ODP pools filled into 'odp_pools[]' + */ +int em_odp_pool2odp(em_pool_t pool, odp_pool_t odp_pools[/*out*/], int num); + +/** + * @brief Get the EM event pool that the given ODP pool belongs to + * + * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool + * is an ODP pool. This function returns the EM event pool that contains the + * given ODP pool as a subpool. + * + * @param odp_pool ODP pool + * + * @return The EM event pool that contains the subpool 'odp_pool' or + * EM_POOL_UNDEF if 'odp_pool' is not part of any EM event pool. + */ +em_pool_t em_odp_pool2em(odp_pool_t odp_pool); + +/** + * @brief Get the ODP schedule group that corresponds to the given EM queue group + * + * @param queue_group + * + * @return ODP schedule group handle + * @retval ODP_SCHED_GROUP_INVALID on error + */ +odp_schedule_group_t em_odp_qgrp2odp(em_queue_group_t queue_group); + +/** + * Enqueue external packets into EM + * + * Enqueue packets from outside of EM into EM queues for processing. + * This function will initialize the odp packets properly as EM events before + * enqueueing them into EM. + * The odp packets might be polled from pktio or some other external source, + * e.g. the em_conf_t::input.input_poll_fn() function (see em_init()) can use + * this API to enqueue polled packets into EM queues. + * Inside EM, the application must use em_send...() instead to send/enqueue + * events into EM queues. + * + * @param pkt_tbl Array of external ODP-packets to enqueue into EM as events. + * The 'pkt_tbl[]' array must contain 'num' valid ODP packet + * handles. + * @param num The number of packets in the 'pkt_tbl[]' array, must be >0. + * @param queue EM queue into which to send/enqueue the packets as EM-events. + * + * @return The number of ODP packets successfully send/enqueued as EM-events + */ +int em_odp_pkt_enqueue(const odp_packet_t pkt_tbl[/*num*/], int num, + em_queue_t queue); + +/** + * @brief Get the odp timer_pool from EM timer handle + * + * Returns the corresponding odp timer_pool from a valid EM timer handle. + * This can be used for e.g. debugging. + * + * DO NOT use any odp apis directly to modify the odp timer_pool created by EM. + * + * @param tmr em timer handle + * + * @return odp timer_pool or ODP_TIMER_POOL_INVALID on failure + */ +odp_timer_pool_t em_odp_timer2odp(em_timer_t tmr); + +/** + * @brief Get the odp timer from EM timeout handle + * + * Returns the corresponding odp timer from a valid EM tmo handle. + * This can be used for e.g. debugging. + * + * DO NOT use any odp apis directly to modify the odp timer created by EM. + * + * @param tmo em timeout handle + * + * @return odp timer or ODP_TIMER_INVALID on failure + */ +odp_timer_t em_odp_tmo2odp(em_tmo_t tmo); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_ODP_EXT_H */ diff --git a/programs/bench/bench_pool.c b/programs/bench/bench_pool.c index c5ea54eb..5375f5a1 100644 --- a/programs/bench/bench_pool.c +++ b/programs/bench/bench_pool.c @@ -1,492 +1,492 @@ -/* Copyright (c) 2023, Nokia - * All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include "bench_common.h" - -#include -#include -#include - -/* User area size in bytes */ -#define UAREA_SIZE 8 - -/* Default event size */ -#define EVENT_SIZE 1024 - -/* Number of events in EM_POOL_DEFAULT */ -#define NUM_EVENTS 1024 - -/* Maximum number of pool statistics to get */ -#define MAX_POOL_STATS 1024u - -/* Number of EM core count */ -#define CORE_COUNT 2 - -typedef struct { - /* Command line options and benchmark info */ - run_bench_arg_t run_bench_arg; - - /* Test case input / output data */ - int subpools[EM_MAX_SUBPOOLS]; - em_pool_stats_opt_t stats_opt; - odp_pool_stats_opt_t stats_opt_odp; - em_pool_info_t pool_info[MAX_POOL_STATS]; - em_pool_stats_t pool_stats[MAX_POOL_STATS]; - em_pool_subpool_stats_t subpool_stats[MAX_POOL_STATS]; - em_pool_stats_selected_t pool_stats_selected[MAX_POOL_STATS]; - em_pool_subpool_stats_selected_t subpool_stats_selected[MAX_POOL_STATS]; - -} gbl_args_t; - -static gbl_args_t *gbl_args; - -ODP_STATIC_ASSERT(REPEAT_COUNT <= MAX_POOL_STATS, "REPEAT_COUNT is bigger than MAX_POOL_STATS\n"); - -/** - * Test functions - */ -static int pool_stats(void) -{ - int i; - - for (i = 0; i < REPEAT_COUNT; i++) - em_pool_stats(EM_POOL_DEFAULT, &gbl_args->pool_stats[i]); - - return i; -} - -static void set_stats_opt(void) -{ - gbl_args->stats_opt.all = 0; - gbl_args->stats_opt.available = 1; - gbl_args->stats_opt.alloc_ops = 1; - gbl_args->stats_opt.alloc_fails = 1; - gbl_args->stats_opt.cache_alloc_ops = 1; - gbl_args->stats_opt.cache_free_ops = 1; - gbl_args->stats_opt.free_ops = 1; - gbl_args->stats_opt.total_ops = 1; - gbl_args->stats_opt.cache_available = 1; -} - -/* Don't read statistics about cache_available */ -static void set_stats_opt_no_cache_avail(void) -{ - gbl_args->stats_opt.all = 0; - gbl_args->stats_opt.available = 1; - gbl_args->stats_opt.alloc_ops = 1; - gbl_args->stats_opt.alloc_fails = 1; - gbl_args->stats_opt.cache_alloc_ops = 1; - gbl_args->stats_opt.cache_free_ops = 1; - gbl_args->stats_opt.free_ops = 1; - gbl_args->stats_opt.total_ops = 1; -} - -static int pool_stats_selected(void) -{ - int i; - - for (i = 0; i < REPEAT_COUNT; i++) - em_pool_stats_selected(EM_POOL_DEFAULT, &gbl_args->pool_stats_selected[i], - &gbl_args->stats_opt); - - return i; -} - -static void set_subpools(void) -{ - gbl_args->subpools[0] = 0; -} - -static int subpool_stats(void) -{ - int i; - - for (i = 0; i < REPEAT_COUNT; i++) - em_pool_subpool_stats(EM_POOL_DEFAULT, gbl_args->subpools, 1, - &gbl_args->subpool_stats[i]); - - return i; -} - -static int subpool_stats_selected(void) -{ - int i; - - for (i = 0; i < REPEAT_COUNT; i++) - em_pool_subpool_stats_selected(EM_POOL_DEFAULT, gbl_args->subpools, 1, - &gbl_args->subpool_stats_selected[i], - &gbl_args->stats_opt); - - return i; -} - -static int pool_info(void) -{ - int i; - - for (i = 0; i < REPEAT_COUNT; i++) - em_pool_info(EM_POOL_DEFAULT, &gbl_args->pool_info[i]); - - return i; -} - -bench_info_t test_suite[] = { - BENCH_INFO(pool_info, NULL, NULL, 0, "em_pool_info"), - BENCH_INFO(pool_stats, NULL, NULL, 0, "em_pool_stats"), - BENCH_INFO(subpool_stats, set_subpools, NULL, 0, "em_pool_subpool_stats"), - BENCH_INFO(pool_stats_selected, set_stats_opt, NULL, 0, "em_pool_stats_selected"), - BENCH_INFO(pool_stats_selected, set_stats_opt_no_cache_avail, NULL, 0, - "em_pool_stats_selected(no cache_availeble)"), - BENCH_INFO(subpool_stats_selected, set_stats_opt, NULL, 0, - "em_pool_subpool_stats_selected"), - BENCH_INFO(subpool_stats_selected, set_stats_opt_no_cache_avail, NULL, 0, - "em_pool_subpool_stats_selected(no cache_available)") -}; - -/* Print usage information */ -static void usage(void) -{ - printf("\n" - "EM event API micro benchmarks\n" - "\n" - "Options:\n" - " -t, --time Time measurement.\n" - " 0: measure CPU cycles (default)\n" - " 1: measure time\n" - " -i, --index Benchmark index to run indefinitely.\n" - " -r, --rounds Run each test case 'num' times (default %u).\n" - " -w, --write-csv Write result to csv files(used in CI) or not.\n" - " default: not write\n" - " -h, --help Display help and exit.\n\n" - "\n", ROUNDS); -} - -/* Parse command line arguments */ -static int parse_args(int argc, char *argv[], int num_bench, cmd_opt_t *cmd_opt/*out*/) -{ - int opt; - int long_index; - static const struct option longopts[] = { - {"time", required_argument, NULL, 't'}, - {"index", required_argument, NULL, 'i'}, - {"rounds", required_argument, NULL, 'r'}, - {"write-csv", no_argument, NULL, 'w'}, - {"help", no_argument, NULL, 'h'}, - {NULL, 0, NULL, 0} - }; - - static const char *shortopts = "t:i:r:wh"; - - cmd_opt->time = 0; /* Measure CPU cycles */ - cmd_opt->bench_idx = 0; /* Run all benchmarks */ - cmd_opt->rounds = ROUNDS; - cmd_opt->write_csv = 0; /* Do not write result to csv files */ - - while (1) { - opt = getopt_long(argc, argv, shortopts, longopts, &long_index); - - if (opt == -1) - break; /* No more options */ - - switch (opt) { - case 't': - cmd_opt->time = atoi(optarg); - break; - case 'i': - cmd_opt->bench_idx = atoi(optarg); - break; - case 'r': - cmd_opt->rounds = atoi(optarg); - break; - case 'w': - cmd_opt->write_csv = 1; - break; - case 'h': - usage(); - return 1; - default: - ODPH_ERR("Bad option. Use -h for help.\n"); - return -1; - } - } - - if (cmd_opt->rounds < 1) { - ODPH_ERR("Invalid test cycle repeat count: %u\n", cmd_opt->rounds); - return -1; - } - - if (cmd_opt->bench_idx < 0 || cmd_opt->bench_idx > num_bench) { - ODPH_ERR("Bad bench index %i\n", cmd_opt->bench_idx); - return -1; - } - - optind = 1; /* Reset 'extern optind' from the getopt lib */ - - return 0; -} - -/* Print system and application info */ -static void print_info(const char *cpumask_str, const cmd_opt_t *com_opt) -{ - odp_sys_info_print(); - - printf("\n" - "bench_pool options\n" - "-------------------\n"); - - printf("Worker CPU mask: %s\n", cpumask_str); - printf("Measurement unit: %s\n", com_opt->time ? "nsec" : "CPU cycles"); - printf("Test rounds: %u\n", com_opt->rounds); - printf("\n"); -} - -static void init_default_pool_config(em_pool_cfg_t *pool_conf) -{ - em_pool_cfg_init(pool_conf); - - pool_conf->event_type = EM_EVENT_TYPE_SW; - pool_conf->user_area.in_use = true; - pool_conf->user_area.size = UAREA_SIZE; - pool_conf->num_subpools = 1; - pool_conf->subpool[0].size = EVENT_SIZE; - pool_conf->subpool[0].num = NUM_EVENTS; - pool_conf->subpool[0].cache_size = 0; -} - -/* Allocate and free events to create more realistic statistics than a band new pool */ -static void alloc_free_event(void) -{ - /* Alloc 10 extra events than the pool has to create some statistics about - * alloc_fails, so 10 EM ERROR prints about em_alloc() are expected. - */ - const int event_tbl_size = NUM_EVENTS + 10; - em_event_t event_tbl[event_tbl_size]; - int i; - - for (i = 0; i < event_tbl_size; i++) { - event_tbl[i] = EM_EVENT_UNDEF; - event_tbl[i] = em_alloc(EVENT_SIZE, EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); - } - - /* Free all allocated events */ - for (i = 0; i < event_tbl_size; i++) { - if (event_tbl[i] != EM_EVENT_UNDEF) - em_free(event_tbl[i]); - } -} - -/* Write selected pool stats without cache_available counters to a different csv - * file than the csv file for pool stats with cache_available. since selected - * pool stats with and without cache_available are in different scale. Different - * file means they will be ploted in different charts in our benchmark website. - */ -static void write_result_to_csv(void) -{ - FILE *file; - char time_str[72] = {0}; - char bench4_desc[60] = {0}; - char bench5_desc[60] = {0}; - char bench6_desc[60] = {0}; - double *result = gbl_args->run_bench_arg.result; - bench_info_t *bench = gbl_args->run_bench_arg.bench; - - fill_time_str(time_str); - - file = fopen("em_pool.csv", "w"); - if (file == NULL) { - perror("Failed to open file em_pool.csv"); - return; - } - - /* Remove substring from long desc so it can be fit in the website chart */ - strncpy(bench4_desc, bench[4].desc, 22); /*em_pool_stats_selected(no cache_availeble)*/ - strncpy(bench5_desc, bench[5].desc + 8, 22);/*em_pool_subpool_stats_selected*/ - /* em_pool_subpool_stats_selected(no cache_available) */ - strncpy(bench6_desc, bench[6].desc + 8, 22); - - fprintf(file, "Date,%s,%s,%s,%s,%s\n" - "%s,%.2f,%.2f,%.2f,%.2f,%.2f\n", - bench[0].desc, bench[1].desc, bench[2].desc, bench[3].desc, bench5_desc, - time_str, result[0], result[1], result[2], result[3], result[5]); - - fclose(file); - - file = fopen("em_pool_no_cache_available.csv", "w"); - if (file == NULL) { - perror("Failed to open file em_pool_no_cache_available.csv"); - return; - } - - fprintf(file, "Date,%s,%s\n%s,%.2f,%.2f\n", bench4_desc, bench6_desc, - time_str, result[4], result[6]); - fclose(file); -} - -int main(int argc, char *argv[]) -{ - em_conf_t conf; - cmd_opt_t cmd_opt; - em_pool_cfg_t pool_conf; - em_core_mask_t core_mask; - odph_helper_options_t helper_options; - odph_thread_t worker_thread; - odph_thread_common_param_t thr_common; - odph_thread_param_t thr_param; - odp_shm_t shm; - odp_cpumask_t cpumask, worker_mask; - odp_instance_t instance; - odp_init_t init_param; - int worker_cpu; - char cpumask_str[ODP_CPUMASK_STR_SIZE]; - int ret = 0; - int num_bench = ARRAY_SIZE(test_suite); - double result[ARRAY_SIZE(test_suite)] = {0}; - - /* Let helper collect its own arguments (e.g. --odph_proc) */ - argc = odph_parse_options(argc, argv); - if (odph_options(&helper_options)) { - ODPH_ERR("Reading ODP helper options failed\n"); - exit(EXIT_FAILURE); - } - - /* Parse and store the application arguments */ - ret = parse_args(argc, argv, num_bench, &cmd_opt); - if (ret) - exit(EXIT_FAILURE); - - odp_init_param_init(&init_param); - init_param.mem_model = helper_options.mem_model; - - /* Init ODP before calling anything else */ - if (odp_init_global(&instance, &init_param, NULL)) { - ODPH_ERR("Global init failed\n"); - exit(EXIT_FAILURE); - } - - /* Init this thread */ - if (odp_init_local(instance, ODP_THREAD_CONTROL)) { - ODPH_ERR("Local init failed\n"); - exit(EXIT_FAILURE); - } - - odp_schedule_config(NULL); - - /* Get worker CPU */ - if (odp_cpumask_default_worker(&worker_mask, 1) != 1) { - ODPH_ERR("Unable to allocate worker thread\n"); - goto odp_term; - } - worker_cpu = odp_cpumask_first(&worker_mask); - (void)odp_cpumask_to_str(&worker_mask, cpumask_str, ODP_CPUMASK_STR_SIZE); - - print_info(cpumask_str, &cmd_opt); - - /* Init EM */ - em_core_mask_zero(&core_mask); - em_core_mask_set(odp_cpu_id(), &core_mask); - em_core_mask_set(worker_cpu, &core_mask); - if (odp_cpumask_count(&core_mask.odp_cpumask) != CORE_COUNT) - goto odp_term; - - init_default_pool_config(&pool_conf); - - em_conf_init(&conf); - if (helper_options.mem_model == ODP_MEM_MODEL_PROCESS) - conf.process_per_core = 1; - else - conf.thread_per_core = 1; - conf.default_pool_cfg = pool_conf; - conf.core_count = CORE_COUNT; - conf.phys_mask = core_mask; - - if (em_init(&conf) != EM_OK) { - ODPH_ERR("EM init failed\n"); - exit(EXIT_FAILURE); - } - - if (em_init_core() != EM_OK) { - ODPH_ERR("EM core init failed\n"); - exit(EXIT_FAILURE); - } - - if (setup_sig_handler()) { - ODPH_ERR("Signal handler setup failed\n"); - exit(EXIT_FAILURE); - } - - /* Reserve memory for args from shared mem */ - shm = odp_shm_reserve("shm_args", sizeof(gbl_args_t), ODP_CACHE_LINE_SIZE, 0); - if (shm == ODP_SHM_INVALID) { - ODPH_ERR("Shared mem reserve failed\n"); - exit(EXIT_FAILURE); - } - - gbl_args = odp_shm_addr(shm); - if (gbl_args == NULL) { - ODPH_ERR("Shared mem alloc failed\n"); - exit(EXIT_FAILURE); - } - - odp_atomic_init_u32(&exit_thread, 0); - - memset(gbl_args, 0, sizeof(gbl_args_t)); - gbl_args->run_bench_arg.bench = test_suite; - gbl_args->run_bench_arg.num_bench = num_bench; - gbl_args->run_bench_arg.opt = cmd_opt; - gbl_args->run_bench_arg.result = result; - - alloc_free_event(); - - memset(&worker_thread, 0, sizeof(odph_thread_t)); - odp_cpumask_zero(&cpumask); - odp_cpumask_set(&cpumask, worker_cpu); - - odph_thread_common_param_init(&thr_common); - thr_common.instance = instance; - thr_common.cpumask = &cpumask; - thr_common.share_param = 1; - - odph_thread_param_init(&thr_param); - thr_param.start = run_benchmarks; - thr_param.arg = &gbl_args->run_bench_arg; - thr_param.thr_type = ODP_THREAD_WORKER; - - odph_thread_create(&worker_thread, &thr_common, &thr_param, 1); - - odph_thread_join(&worker_thread, 1); - - ret = gbl_args->run_bench_arg.bench_failed; - - if (cmd_opt.write_csv) - write_result_to_csv(); - - if (em_term_core() != EM_OK) - ODPH_ERR("EM core terminate failed\n"); - - if (em_term(&conf) != EM_OK) - ODPH_ERR("EM terminate failed\n"); - - if (odp_shm_free(shm)) { - ODPH_ERR("Shared mem free failed\n"); - exit(EXIT_FAILURE); - } - -odp_term: - if (odp_term_local()) { - ODPH_ERR("Local term failed\n"); - exit(EXIT_FAILURE); - } - - if (odp_term_global(instance)) { - ODPH_ERR("Global term failed\n"); - exit(EXIT_FAILURE); - } - - if (ret < 0) - return EXIT_FAILURE; - - return EXIT_SUCCESS; -} +/* Copyright (c) 2023, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "bench_common.h" + +#include +#include +#include + +/* User area size in bytes */ +#define UAREA_SIZE 8 + +/* Default event size */ +#define EVENT_SIZE 1024 + +/* Number of events in EM_POOL_DEFAULT */ +#define NUM_EVENTS 1024 + +/* Maximum number of pool statistics to get */ +#define MAX_POOL_STATS 1024u + +/* Number of EM core count */ +#define CORE_COUNT 2 + +typedef struct { + /* Command line options and benchmark info */ + run_bench_arg_t run_bench_arg; + + /* Test case input / output data */ + int subpools[EM_MAX_SUBPOOLS]; + em_pool_stats_opt_t stats_opt; + odp_pool_stats_opt_t stats_opt_odp; + em_pool_info_t pool_info[MAX_POOL_STATS]; + em_pool_stats_t pool_stats[MAX_POOL_STATS]; + em_pool_subpool_stats_t subpool_stats[MAX_POOL_STATS]; + em_pool_stats_selected_t pool_stats_selected[MAX_POOL_STATS]; + em_pool_subpool_stats_selected_t subpool_stats_selected[MAX_POOL_STATS]; + +} gbl_args_t; + +static gbl_args_t *gbl_args; + +ODP_STATIC_ASSERT(REPEAT_COUNT <= MAX_POOL_STATS, "REPEAT_COUNT is bigger than MAX_POOL_STATS\n"); + +/** + * Test functions + */ +static int pool_stats(void) +{ + int i; + + for (i = 0; i < REPEAT_COUNT; i++) + em_pool_stats(EM_POOL_DEFAULT, &gbl_args->pool_stats[i]); + + return i; +} + +static void set_stats_opt(void) +{ + gbl_args->stats_opt.all = 0; + gbl_args->stats_opt.available = 1; + gbl_args->stats_opt.alloc_ops = 1; + gbl_args->stats_opt.alloc_fails = 1; + gbl_args->stats_opt.cache_alloc_ops = 1; + gbl_args->stats_opt.cache_free_ops = 1; + gbl_args->stats_opt.free_ops = 1; + gbl_args->stats_opt.total_ops = 1; + gbl_args->stats_opt.cache_available = 1; +} + +/* Don't read statistics about cache_available */ +static void set_stats_opt_no_cache_avail(void) +{ + gbl_args->stats_opt.all = 0; + gbl_args->stats_opt.available = 1; + gbl_args->stats_opt.alloc_ops = 1; + gbl_args->stats_opt.alloc_fails = 1; + gbl_args->stats_opt.cache_alloc_ops = 1; + gbl_args->stats_opt.cache_free_ops = 1; + gbl_args->stats_opt.free_ops = 1; + gbl_args->stats_opt.total_ops = 1; +} + +static int pool_stats_selected(void) +{ + int i; + + for (i = 0; i < REPEAT_COUNT; i++) + em_pool_stats_selected(EM_POOL_DEFAULT, &gbl_args->pool_stats_selected[i], + &gbl_args->stats_opt); + + return i; +} + +static void set_subpools(void) +{ + gbl_args->subpools[0] = 0; +} + +static int subpool_stats(void) +{ + int i; + + for (i = 0; i < REPEAT_COUNT; i++) + em_pool_subpool_stats(EM_POOL_DEFAULT, gbl_args->subpools, 1, + &gbl_args->subpool_stats[i]); + + return i; +} + +static int subpool_stats_selected(void) +{ + int i; + + for (i = 0; i < REPEAT_COUNT; i++) + em_pool_subpool_stats_selected(EM_POOL_DEFAULT, gbl_args->subpools, 1, + &gbl_args->subpool_stats_selected[i], + &gbl_args->stats_opt); + + return i; +} + +static int pool_info(void) +{ + int i; + + for (i = 0; i < REPEAT_COUNT; i++) + em_pool_info(EM_POOL_DEFAULT, &gbl_args->pool_info[i]); + + return i; +} + +bench_info_t test_suite[] = { + BENCH_INFO(pool_info, NULL, NULL, 0, "em_pool_info"), + BENCH_INFO(pool_stats, NULL, NULL, 0, "em_pool_stats"), + BENCH_INFO(subpool_stats, set_subpools, NULL, 0, "em_pool_subpool_stats"), + BENCH_INFO(pool_stats_selected, set_stats_opt, NULL, 0, "em_pool_stats_selected"), + BENCH_INFO(pool_stats_selected, set_stats_opt_no_cache_avail, NULL, 0, + "em_pool_stats_selected(no cache_availeble)"), + BENCH_INFO(subpool_stats_selected, set_stats_opt, NULL, 0, + "em_pool_subpool_stats_selected"), + BENCH_INFO(subpool_stats_selected, set_stats_opt_no_cache_avail, NULL, 0, + "em_pool_subpool_stats_selected(no cache_available)") +}; + +/* Print usage information */ +static void usage(void) +{ + printf("\n" + "EM event API micro benchmarks\n" + "\n" + "Options:\n" + " -t, --time Time measurement.\n" + " 0: measure CPU cycles (default)\n" + " 1: measure time\n" + " -i, --index Benchmark index to run indefinitely.\n" + " -r, --rounds Run each test case 'num' times (default %u).\n" + " -w, --write-csv Write result to csv files(used in CI) or not.\n" + " default: not write\n" + " -h, --help Display help and exit.\n\n" + "\n", ROUNDS); +} + +/* Parse command line arguments */ +static int parse_args(int argc, char *argv[], int num_bench, cmd_opt_t *cmd_opt/*out*/) +{ + int opt; + int long_index; + static const struct option longopts[] = { + {"time", required_argument, NULL, 't'}, + {"index", required_argument, NULL, 'i'}, + {"rounds", required_argument, NULL, 'r'}, + {"write-csv", no_argument, NULL, 'w'}, + {"help", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0} + }; + + static const char *shortopts = "t:i:r:wh"; + + cmd_opt->time = 0; /* Measure CPU cycles */ + cmd_opt->bench_idx = 0; /* Run all benchmarks */ + cmd_opt->rounds = ROUNDS; + cmd_opt->write_csv = 0; /* Do not write result to csv files */ + + while (1) { + opt = getopt_long(argc, argv, shortopts, longopts, &long_index); + + if (opt == -1) + break; /* No more options */ + + switch (opt) { + case 't': + cmd_opt->time = atoi(optarg); + break; + case 'i': + cmd_opt->bench_idx = atoi(optarg); + break; + case 'r': + cmd_opt->rounds = atoi(optarg); + break; + case 'w': + cmd_opt->write_csv = 1; + break; + case 'h': + usage(); + return 1; + default: + ODPH_ERR("Bad option. Use -h for help.\n"); + return -1; + } + } + + if (cmd_opt->rounds < 1) { + ODPH_ERR("Invalid test cycle repeat count: %u\n", cmd_opt->rounds); + return -1; + } + + if (cmd_opt->bench_idx < 0 || cmd_opt->bench_idx > num_bench) { + ODPH_ERR("Bad bench index %i\n", cmd_opt->bench_idx); + return -1; + } + + optind = 1; /* Reset 'extern optind' from the getopt lib */ + + return 0; +} + +/* Print system and application info */ +static void print_info(const char *cpumask_str, const cmd_opt_t *com_opt) +{ + odp_sys_info_print(); + + printf("\n" + "bench_pool options\n" + "-------------------\n"); + + printf("Worker CPU mask: %s\n", cpumask_str); + printf("Measurement unit: %s\n", com_opt->time ? "nsec" : "CPU cycles"); + printf("Test rounds: %u\n", com_opt->rounds); + printf("\n"); +} + +static void init_default_pool_config(em_pool_cfg_t *pool_conf) +{ + em_pool_cfg_init(pool_conf); + + pool_conf->event_type = EM_EVENT_TYPE_SW; + pool_conf->user_area.in_use = true; + pool_conf->user_area.size = UAREA_SIZE; + pool_conf->num_subpools = 1; + pool_conf->subpool[0].size = EVENT_SIZE; + pool_conf->subpool[0].num = NUM_EVENTS; + pool_conf->subpool[0].cache_size = 0; +} + +/* Allocate and free events to create more realistic statistics than a band new pool */ +static void alloc_free_event(void) +{ + /* Alloc 10 extra events than the pool has to create some statistics about + * alloc_fails, so 10 EM ERROR prints about em_alloc() are expected. + */ + const int event_tbl_size = NUM_EVENTS + 10; + em_event_t event_tbl[event_tbl_size]; + int i; + + for (i = 0; i < event_tbl_size; i++) { + event_tbl[i] = EM_EVENT_UNDEF; + event_tbl[i] = em_alloc(EVENT_SIZE, EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); + } + + /* Free all allocated events */ + for (i = 0; i < event_tbl_size; i++) { + if (event_tbl[i] != EM_EVENT_UNDEF) + em_free(event_tbl[i]); + } +} + +/* Write selected pool stats without cache_available counters to a different csv + * file than the csv file for pool stats with cache_available. since selected + * pool stats with and without cache_available are in different scale. Different + * file means they will be plotted in different charts in our benchmark website. + */ +static void write_result_to_csv(void) +{ + FILE *file; + char time_str[72] = {0}; + char bench4_desc[60] = {0}; + char bench5_desc[60] = {0}; + char bench6_desc[60] = {0}; + double *result = gbl_args->run_bench_arg.result; + bench_info_t *bench = gbl_args->run_bench_arg.bench; + + fill_time_str(time_str); + + file = fopen("em_pool.csv", "w"); + if (file == NULL) { + perror("Failed to open file em_pool.csv"); + return; + } + + /* Remove substring from long desc so it can be fit in the website chart */ + strncpy(bench4_desc, bench[4].desc, 22); /*em_pool_stats_selected(no cache_availeble)*/ + strncpy(bench5_desc, bench[5].desc + 8, 22);/*em_pool_subpool_stats_selected*/ + /* em_pool_subpool_stats_selected(no cache_available) */ + strncpy(bench6_desc, bench[6].desc + 8, 22); + + fprintf(file, "Date,%s,%s,%s,%s,%s\n" + "%s,%.2f,%.2f,%.2f,%.2f,%.2f\n", + bench[0].desc, bench[1].desc, bench[2].desc, bench[3].desc, bench5_desc, + time_str, result[0], result[1], result[2], result[3], result[5]); + + fclose(file); + + file = fopen("em_pool_no_cache_available.csv", "w"); + if (file == NULL) { + perror("Failed to open file em_pool_no_cache_available.csv"); + return; + } + + fprintf(file, "Date,%s,%s\n%s,%.2f,%.2f\n", bench4_desc, bench6_desc, + time_str, result[4], result[6]); + fclose(file); +} + +int main(int argc, char *argv[]) +{ + em_conf_t conf; + cmd_opt_t cmd_opt; + em_pool_cfg_t pool_conf; + em_core_mask_t core_mask; + odph_helper_options_t helper_options; + odph_thread_t worker_thread; + odph_thread_common_param_t thr_common; + odph_thread_param_t thr_param; + odp_shm_t shm; + odp_cpumask_t cpumask, worker_mask; + odp_instance_t instance; + odp_init_t init_param; + int worker_cpu; + char cpumask_str[ODP_CPUMASK_STR_SIZE]; + int ret = 0; + int num_bench = ARRAY_SIZE(test_suite); + double result[ARRAY_SIZE(test_suite)] = {0}; + + /* Let helper collect its own arguments (e.g. --odph_proc) */ + argc = odph_parse_options(argc, argv); + if (odph_options(&helper_options)) { + ODPH_ERR("Reading ODP helper options failed\n"); + exit(EXIT_FAILURE); + } + + /* Parse and store the application arguments */ + ret = parse_args(argc, argv, num_bench, &cmd_opt); + if (ret) + exit(EXIT_FAILURE); + + odp_init_param_init(&init_param); + init_param.mem_model = helper_options.mem_model; + + /* Init ODP before calling anything else */ + if (odp_init_global(&instance, &init_param, NULL)) { + ODPH_ERR("Global init failed\n"); + exit(EXIT_FAILURE); + } + + /* Init this thread */ + if (odp_init_local(instance, ODP_THREAD_CONTROL)) { + ODPH_ERR("Local init failed\n"); + exit(EXIT_FAILURE); + } + + odp_schedule_config(NULL); + + /* Get worker CPU */ + if (odp_cpumask_default_worker(&worker_mask, 1) != 1) { + ODPH_ERR("Unable to allocate worker thread\n"); + goto odp_term; + } + worker_cpu = odp_cpumask_first(&worker_mask); + (void)odp_cpumask_to_str(&worker_mask, cpumask_str, ODP_CPUMASK_STR_SIZE); + + print_info(cpumask_str, &cmd_opt); + + /* Init EM */ + em_core_mask_zero(&core_mask); + em_core_mask_set(odp_cpu_id(), &core_mask); + em_core_mask_set(worker_cpu, &core_mask); + if (odp_cpumask_count(&core_mask.odp_cpumask) != CORE_COUNT) + goto odp_term; + + init_default_pool_config(&pool_conf); + + em_conf_init(&conf); + if (helper_options.mem_model == ODP_MEM_MODEL_PROCESS) + conf.process_per_core = 1; + else + conf.thread_per_core = 1; + conf.default_pool_cfg = pool_conf; + conf.core_count = CORE_COUNT; + conf.phys_mask = core_mask; + + if (em_init(&conf) != EM_OK) { + ODPH_ERR("EM init failed\n"); + exit(EXIT_FAILURE); + } + + if (em_init_core() != EM_OK) { + ODPH_ERR("EM core init failed\n"); + exit(EXIT_FAILURE); + } + + if (setup_sig_handler()) { + ODPH_ERR("Signal handler setup failed\n"); + exit(EXIT_FAILURE); + } + + /* Reserve memory for args from shared mem */ + shm = odp_shm_reserve("shm_args", sizeof(gbl_args_t), ODP_CACHE_LINE_SIZE, 0); + if (shm == ODP_SHM_INVALID) { + ODPH_ERR("Shared mem reserve failed\n"); + exit(EXIT_FAILURE); + } + + gbl_args = odp_shm_addr(shm); + if (gbl_args == NULL) { + ODPH_ERR("Shared mem alloc failed\n"); + exit(EXIT_FAILURE); + } + + odp_atomic_init_u32(&exit_thread, 0); + + memset(gbl_args, 0, sizeof(gbl_args_t)); + gbl_args->run_bench_arg.bench = test_suite; + gbl_args->run_bench_arg.num_bench = num_bench; + gbl_args->run_bench_arg.opt = cmd_opt; + gbl_args->run_bench_arg.result = result; + + alloc_free_event(); + + memset(&worker_thread, 0, sizeof(odph_thread_t)); + odp_cpumask_zero(&cpumask); + odp_cpumask_set(&cpumask, worker_cpu); + + odph_thread_common_param_init(&thr_common); + thr_common.instance = instance; + thr_common.cpumask = &cpumask; + thr_common.share_param = 1; + + odph_thread_param_init(&thr_param); + thr_param.start = run_benchmarks; + thr_param.arg = &gbl_args->run_bench_arg; + thr_param.thr_type = ODP_THREAD_WORKER; + + odph_thread_create(&worker_thread, &thr_common, &thr_param, 1); + + odph_thread_join(&worker_thread, 1); + + ret = gbl_args->run_bench_arg.bench_failed; + + if (cmd_opt.write_csv) + write_result_to_csv(); + + if (em_term_core() != EM_OK) + ODPH_ERR("EM core terminate failed\n"); + + if (em_term(&conf) != EM_OK) + ODPH_ERR("EM terminate failed\n"); + + if (odp_shm_free(shm)) { + ODPH_ERR("Shared mem free failed\n"); + exit(EXIT_FAILURE); + } + +odp_term: + if (odp_term_local()) { + ODPH_ERR("Local term failed\n"); + exit(EXIT_FAILURE); + } + + if (odp_term_global(instance)) { + ODPH_ERR("Global term failed\n"); + exit(EXIT_FAILURE); + } + + if (ret < 0) + return EXIT_FAILURE; + + return EXIT_SUCCESS; +} diff --git a/programs/common/cm_pktio.c b/programs/common/cm_pktio.c index a7211f77..daab91eb 100644 --- a/programs/common/cm_pktio.c +++ b/programs/common/cm_pktio.c @@ -1,1414 +1,1414 @@ -/* - * Copyright (c) 2015-2022, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * - * EM-ODP packet I/O setup - */ -#include -#include -#include -#include -#include - -#include "cm_setup.h" -#include "cm_pktio.h" - -#define PKTIO_PKT_POOL_NUM_BUFS (32 * 1024) -#define PKTIO_PKT_POOL_BUF_SIZE 1536 -#define PKTIO_VEC_POOL_VEC_SIZE 32 -#define PKTIO_VEC_SIZE PKTIO_VEC_POOL_VEC_SIZE -#define PKTIO_VEC_TMO ODP_TIME_MSEC_IN_NS - -static pktio_shm_t *pktio_shm; -static __thread pktio_locm_t pktio_locm ODP_ALIGNED_CACHE; - -static inline tx_burst_t *tx_drain_burst_acquire(void); -static inline int pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/); -static inline odp_queue_t plain_queue_acquire(void); - -const char *pktin_mode_str(pktin_mode_t in_mode) -{ - const char *str; - - switch (in_mode) { - case DIRECT_RECV: - str = "DIRECT_RECV"; - break; - case PLAIN_QUEUE: - str = "PLAIN_QUEUE"; - break; - case SCHED_PARALLEL: - str = "SCHED_PARALLEL"; - break; - case SCHED_ATOMIC: - str = "SCHED_ATOMIC"; - break; - case SCHED_ORDERED: - str = "SCHED_ORDERED"; - break; - default: - str = "UNKNOWN"; - break; - } - - return str; -} - -bool pktin_polled_mode(pktin_mode_t in_mode) -{ - return in_mode == DIRECT_RECV || - in_mode == PLAIN_QUEUE; -} - -bool pktin_sched_mode(pktin_mode_t in_mode) -{ - return in_mode == SCHED_PARALLEL || - in_mode == SCHED_ATOMIC || - in_mode == SCHED_ORDERED; -} - -void pktio_mem_reserve(void) -{ - odp_shm_t shm; - uint32_t flags = 0; - - /* Sanity check: em_shm should not be set yet */ - if (unlikely(pktio_shm != NULL)) - APPL_EXIT_FAILURE("pktio shared memory ptr set - already initialized?"); - - odp_shm_capability_t shm_capa; - int ret = odp_shm_capability(&shm_capa); - - if (unlikely(ret)) - APPL_EXIT_FAILURE("shm capability error:%d", ret); - - if (shm_capa.flags & ODP_SHM_SINGLE_VA) - flags |= ODP_SHM_SINGLE_VA; - - /* Reserve packet I/O shared memory */ - shm = odp_shm_reserve("pktio_shm", sizeof(pktio_shm_t), - ODP_CACHE_LINE_SIZE, flags); - - if (unlikely(shm == ODP_SHM_INVALID)) - APPL_EXIT_FAILURE("pktio shared mem reserve failed."); - - pktio_shm = odp_shm_addr(shm); - if (unlikely(pktio_shm == NULL)) - APPL_EXIT_FAILURE("obtaining pktio shared mem addr failed."); - - memset(pktio_shm, 0, sizeof(pktio_shm_t)); -} - -void pktio_mem_lookup(bool is_thread_per_core) -{ - odp_shm_t shm; - pktio_shm_t *shm_addr; - - shm = odp_shm_lookup("pktio_shm"); - - shm_addr = odp_shm_addr(shm); - if (unlikely(shm_addr == NULL)) - APPL_EXIT_FAILURE("pktio shared mem addr lookup failed."); - - /* - * Set pktio_shm in process-per-core mode, each process has own pointer. - */ - if (!is_thread_per_core && pktio_shm != shm_addr) - pktio_shm = shm_addr; -} - -void pktio_mem_free(void) -{ - odp_shm_t shm; - - shm = odp_shm_lookup("pktio_shm"); - if (unlikely(shm == ODP_SHM_INVALID)) - APPL_EXIT_FAILURE("pktio shared mem lookup for free failed."); - - if (odp_shm_free(shm) != 0) - APPL_EXIT_FAILURE("pktio shared mem free failed."); - pktio_shm = NULL; -} - -/** - * Helper to pktio_pool_create(): create the pktio pool as an EM event-pool - */ -static void pktio_pool_create_em(int if_count, const odp_pool_capability_t *pool_capa) -{ - /* - * Create the pktio pkt pool used for actual input pkts. - * Create the pool as an EM-pool (and convert into an ODP-pool where - * needed) to be able to utilize EM's Event State Verification (ESV) - * in the 'esv.prealloc_pools = true' mode (see config/em-odp.conf). - */ - em_pool_cfg_t pool_cfg; - em_pool_t pool; - - em_pool_cfg_init(&pool_cfg); - pool_cfg.event_type = EM_EVENT_TYPE_PACKET; - pool_cfg.num_subpools = 1; - pool_cfg.subpool[0].size = PKTIO_PKT_POOL_BUF_SIZE; - pool_cfg.subpool[0].num = if_count * PKTIO_PKT_POOL_NUM_BUFS; - /* Use max thread-local pkt-cache size to speed up pktio allocs */ - pool_cfg.subpool[0].cache_size = pool_capa->pkt.max_cache_size; - pool = em_pool_create("pktio-pool-em", EM_POOL_UNDEF, &pool_cfg); - if (pool == EM_POOL_UNDEF) - APPL_EXIT_FAILURE("pktio pool creation failed"); - - /* Convert: EM-pool to ODP-pool */ - odp_pool_t odp_pool = ODP_POOL_INVALID; - int ret = em_odp_pool2odp(pool, &odp_pool, 1); - - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("EM pktio pool creation failed:%d", ret); - - /* Store the EM pktio pool and the corresponding ODP subpool */ - pktio_shm->pools.pktpool_em = pool; - pktio_shm->pools.pktpool_odp = odp_pool; - - odp_pool_print(pktio_shm->pools.pktpool_odp); -} - -/** - * Helper to pktio_pool_create(): create the pktio pool as an ODP pkt-pool - */ -static void pktio_pool_create_odp(int if_count, const odp_pool_capability_t *pool_capa) -{ - odp_pool_param_t pool_params; - - (void)pool_capa; - - odp_pool_param_init(&pool_params); - pool_params.pkt.num = if_count * PKTIO_PKT_POOL_NUM_BUFS; - /* pool_params.pkt.max_num = default */ - pool_params.pkt.len = PKTIO_PKT_POOL_BUF_SIZE; - pool_params.pkt.max_len = PKTIO_PKT_POOL_BUF_SIZE; - pool_params.pkt.seg_len = PKTIO_PKT_POOL_BUF_SIZE; - - pool_params.type = ODP_POOL_PACKET; - pool_params.pkt.uarea_size = em_odp_event_hdr_size(); - - odp_pool_t odp_pool = odp_pool_create("pktio-pool-odp", &pool_params); - - if (odp_pool == ODP_POOL_INVALID) - APPL_EXIT_FAILURE("pktio pool creation failed"); - - /* Store the ODP pktio pool */ - pktio_shm->pools.pktpool_odp = odp_pool; - pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; - - odp_pool_print(pktio_shm->pools.pktpool_odp); -} - -static void pktio_vectorpool_create_em(int if_count, const odp_pool_capability_t *pool_capa) -{ - if (unlikely(pool_capa->vector.max_pools == 0 || - pool_capa->vector.max_size == 0)) - APPL_EXIT_FAILURE("ODP pktin vectors not supported!"); - - uint32_t vec_size = PKTIO_VEC_POOL_VEC_SIZE; - uint32_t num_pkt = PKTIO_PKT_POOL_NUM_BUFS * if_count; - uint32_t num_vec = num_pkt; /* worst case: 1 pkt per vector */ - - if (vec_size > pool_capa->vector.max_size) { - vec_size = pool_capa->vector.max_size; - APPL_PRINT("\nWarning: pktin vector size reduced to %u\n\n", - vec_size); - } - - if (pool_capa->vector.max_num /* 0=limited only by pool memsize */ && - num_vec > pool_capa->vector.max_num) { - num_vec = pool_capa->vector.max_num; - APPL_PRINT("\nWarning: pktin number of vectors reduced to %u\n\n", - num_vec); - } - - em_pool_cfg_t pool_cfg; - - em_pool_cfg_init(&pool_cfg); - pool_cfg.event_type = EM_EVENT_TYPE_VECTOR; - pool_cfg.num_subpools = 1; - - pool_cfg.subpool[0].size = vec_size; /* nbr of events in vector */ - pool_cfg.subpool[0].num = num_vec; - /* Use max thread-local pkt-cache size to speed up pktio allocs */ - pool_cfg.subpool[0].cache_size = pool_capa->pkt.max_cache_size; - - em_pool_t vector_pool = em_pool_create("vector-pool-em", EM_POOL_UNDEF, &pool_cfg); - - if (vector_pool == EM_POOL_UNDEF) - APPL_EXIT_FAILURE("EM vector pool create failed"); - - /* Convert: EM-pool to ODP-pool */ - odp_pool_t odp_vecpool = ODP_POOL_INVALID; - int ret = em_odp_pool2odp(vector_pool, &odp_vecpool, 1); - - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("EM pktio pool creation failed:%d", ret); - - /* Store the EM pktio pool and the corresponding ODP subpool */ - pktio_shm->pools.vecpool_em = vector_pool; - pktio_shm->pools.vecpool_odp = odp_vecpool; - - odp_pool_print(odp_vecpool); -} - -static void pktio_vectorpool_create_odp(int if_count, const odp_pool_capability_t *pool_capa) -{ - odp_pool_param_t pool_params; - - odp_pool_param_init(&pool_params); - - pool_params.type = ODP_POOL_VECTOR; - - if (unlikely(pool_capa->vector.max_pools == 0 || - pool_capa->vector.max_size == 0)) - APPL_EXIT_FAILURE("ODP pktin vectors not supported!"); - - uint32_t vec_size = PKTIO_VEC_POOL_VEC_SIZE; - uint32_t num_pkt = PKTIO_PKT_POOL_NUM_BUFS * if_count; - uint32_t num_vec = num_pkt; /* worst case: 1 pkt per vector */ - - if (vec_size > pool_capa->vector.max_size) { - vec_size = pool_capa->vector.max_size; - APPL_PRINT("\nWarning: pktin vector size reduced to %u\n\n", - vec_size); - } - - if (pool_capa->vector.max_num /* 0=limited only by pool memsize */ && - num_vec > pool_capa->vector.max_num) { - num_vec = pool_capa->vector.max_num; - APPL_PRINT("\nWarning: pktin number of vectors reduced to %u\n\n", - num_vec); - } - - pool_params.vector.num = num_vec; - pool_params.vector.max_size = vec_size; - pool_params.vector.uarea_size = em_odp_event_hdr_size(); - - odp_pool_t vector_pool = odp_pool_create("vector-pool-odp", &pool_params); - - if (vector_pool == ODP_POOL_INVALID) - APPL_EXIT_FAILURE("ODP vector pool create failed"); - - pktio_shm->pools.vecpool_odp = vector_pool; - - odp_pool_print(vector_pool); -} - -/** - * Create the memory pool used by pkt-io - */ -void pktio_pool_create(int if_count, bool pktpool_em, - bool pktin_vector, bool vecpool_em) -{ - odp_pool_capability_t pool_capa; - - if (odp_pool_capability(&pool_capa) != 0) - APPL_EXIT_FAILURE("Can't get odp-pool capability"); - /* - * Create the pktio pkt pool used for actual input pkts. - * Create the pool either as an EM- or ODP-pool. - */ - if (pktpool_em) - pktio_pool_create_em(if_count, &pool_capa); - else - pktio_pool_create_odp(if_count, &pool_capa); - - if (pktin_vector) { - if (vecpool_em) - pktio_vectorpool_create_em(if_count, &pool_capa); - else - pktio_vectorpool_create_odp(if_count, &pool_capa); - } -} - -/** - * Helper to pktio_pool_destroy(): destroy the EM event-pool used for pktio - */ -static void pktio_pool_destroy_em(void) -{ - APPL_PRINT("\n%s(): deleting the EM pktio-pool:\n", __func__); - em_pool_info_print(pktio_shm->pools.pktpool_em); - - if (em_pool_delete(pktio_shm->pools.pktpool_em) != EM_OK) - APPL_EXIT_FAILURE("EM pktio-pool delete failed."); - - pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; - pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; -} - -/** - * Helper to pktio_pool_destroy(): destroy the ODP pkt-pool used for pktio - */ -static void pktio_pool_destroy_odp(void) -{ - APPL_PRINT("\n%s(): destroying the ODP pktio-pool\n", __func__); - if (odp_pool_destroy(pktio_shm->pools.pktpool_odp) != 0) - APPL_EXIT_FAILURE("ODP pktio-pool destroy failed."); - - pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; -} - -/** - * Helper to pktio_pool_destroy(): destroy the pktin EM vector pool - */ -static void pktio_vectorpool_destroy_em(void) -{ - APPL_PRINT("\n%s(): deleting the EM vector-pool:\n", __func__); - em_pool_info_print(pktio_shm->pools.vecpool_em); - - if (em_pool_delete(pktio_shm->pools.vecpool_em) != EM_OK) - APPL_EXIT_FAILURE("EM pktio-pool delete failed."); - - pktio_shm->pools.vecpool_em = EM_POOL_UNDEF; - pktio_shm->pools.vecpool_odp = ODP_POOL_INVALID; -} - -/** - * Helper to pktio_pool_destroy(): destroy the ODP pktin vector pool - */ -static void pktio_vectorpool_destroy_odp(void) -{ - APPL_PRINT("\n%s(): destroying the ODP pktin vector-pool\n", __func__); - if (odp_pool_destroy(pktio_shm->pools.vecpool_odp) != 0) - APPL_EXIT_FAILURE("ODP pktin vector-pool destroy failed."); - - pktio_shm->pools.vecpool_odp = ODP_POOL_INVALID; -} - -/** - * Destroy the memory pool used by pkt-io - */ -void pktio_pool_destroy(bool pktpool_em, bool pktin_vector, bool vecpool_em) -{ - if (pktpool_em) - pktio_pool_destroy_em(); - else - pktio_pool_destroy_odp(); - - if (pktin_vector) { - if (vecpool_em) - pktio_vectorpool_destroy_em(); - else - pktio_vectorpool_destroy_odp(); - } -} - -void pktio_init(const appl_conf_t *appl_conf) -{ - pktin_mode_t in_mode = appl_conf->pktio.in_mode; - odp_stash_capability_t stash_capa; - odp_stash_param_t stash_param; - odp_stash_t stash; - int ret; - - pktio_shm->ifs.count = appl_conf->pktio.if_count; - pktio_shm->ifs.num_created = 0; - pktio_shm->default_queue = EM_QUEUE_UNDEF; - - pktio_shm->pktin.in_mode = in_mode; - pktio_shm->pktin.pktin_queue_stash = ODP_STASH_INVALID; - - ret = odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO); - if (ret != 0) - APPL_EXIT_FAILURE("odp_stash_capability() fails:%d", ret); - - if (pktin_polled_mode(in_mode)) { - /* - * Create a stash to hold the shared queues used in pkt input. Each core - * needs to get one queue to be able to use it to receive packets. - * DIRECT_RECV-mode: the stash contains pointers to odp_pktin_queue_t:s - * PLAIN_QUEUE-mode: the stash contains odp_queue_t:s - */ - odp_stash_param_init(&stash_param); - stash_param.type = ODP_STASH_TYPE_FIFO; - stash_param.put_mode = ODP_STASH_OP_MT; - stash_param.get_mode = ODP_STASH_OP_MT; - stash_param.num_obj = PKTIO_MAX_IN_QUEUES * IF_MAX_NUM; - if (stash_param.num_obj > stash_capa.max_num_obj) - APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", - stash_param.num_obj); - stash_param.obj_size = MAX(sizeof(odp_queue_t), sizeof(odp_pktin_queue_t *)); - if (!POWEROF2(stash_param.obj_size) || - stash_param.obj_size != sizeof(uintptr_t) || - stash_param.obj_size > stash_capa.max_obj_size) { - APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u, max:%u", - stash_param.obj_size, stash_capa.max_obj_size); - } - stash_param.cache_size = 0; /* No core local caching */ - - stash = odp_stash_create("pktin.pktin_queue_stash", &stash_param); - if (stash == ODP_STASH_INVALID) - APPL_EXIT_FAILURE("odp_stash_create() fails"); - - pktio_shm->pktin.pktin_queue_stash = stash; - } - - /* - * Create a stash to hold the shared tx-burst buffers, - * used when draining the available tx-burst buffers - */ - odp_stash_param_init(&stash_param); - stash_param.type = ODP_STASH_TYPE_FIFO; - stash_param.put_mode = ODP_STASH_OP_MT; - stash_param.get_mode = ODP_STASH_OP_MT; - stash_param.num_obj = MAX_TX_BURST_BUFS * IF_MAX_NUM; - if (stash_param.num_obj > stash_capa.max_num_obj) - APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", - stash_param.num_obj); - stash_param.obj_size = sizeof(tx_burst_t *); /* stash pointers */ - if (!POWEROF2(stash_param.obj_size) || - stash_param.obj_size != sizeof(uintptr_t) || - stash_param.obj_size > stash_capa.max_obj_size) { - APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u", - stash_param.obj_size); - } - stash_param.cache_size = 0; /* No core local caching */ - - stash = odp_stash_create("pktout.tx-burst-stash", &stash_param); - if (stash == ODP_STASH_INVALID) - APPL_EXIT_FAILURE("odp_stash_create() fails"); - pktio_shm->pktout.tx_burst_stash = stash; - - /* Misc inits: */ - for (int i = 0; i < MAX_RX_PKT_QUEUES; i++) { - pktio_shm->rx_pkt_queues[i].pos = i; - pktio_shm->rx_pkt_queues[i].queue = EM_QUEUE_UNDEF; - } - - odp_ticketlock_init(&pktio_shm->tbl_lookup.lock); - pktio_shm->tbl_lookup.tbl_idx = 0; - pktio_shm->tbl_lookup.ops = cuckoo_table_ops; - odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); - pktio_shm->tbl_lookup.tbl = - pktio_shm->tbl_lookup.ops.f_create("RX-lookup-tbl", MAX_RX_PKT_QUEUES, - sizeof(pkt_q_hash_key_t), - sizeof(rx_pkt_queue_t)); - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - if (unlikely(pktio_shm->tbl_lookup.tbl == NULL)) - APPL_EXIT_FAILURE("rx pkt lookup table creation fails"); -} - -void pktio_deinit(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - - if (pktin_polled_mode(appl_conf->pktio.in_mode)) - odp_stash_destroy(pktio_shm->pktin.pktin_queue_stash); - odp_stash_destroy(pktio_shm->pktout.tx_burst_stash); - - pktio_shm->tbl_lookup.ops.f_des(pktio_shm->tbl_lookup.tbl); -} - -static void pktio_tx_buffering_create(int if_num) -{ - tx_burst_t *tx_burst; - odp_queue_param_t queue_param; - odp_queue_t odp_queue; - int pktout_idx; - odp_queue_t pktout_queue; - char name[ODP_QUEUE_NAME_LEN]; - - const int pktout_num_queues = pktio_shm->pktout.num_queues[if_num]; - - for (int i = 0; i < MAX_TX_BURST_BUFS; i++) { - tx_burst = &pktio_shm->tx_burst[if_num][i]; - - odp_atomic_init_u64(&tx_burst->cnt, 0); - odp_spinlock_init(&tx_burst->lock); - - odp_queue_param_init(&queue_param); - queue_param.type = ODP_QUEUE_TYPE_PLAIN; - queue_param.enq_mode = ODP_QUEUE_OP_MT; - queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; - /* ignore odp ordering, EM handles output order, just buffer */ - queue_param.order = ODP_QUEUE_ORDER_IGNORE; - - snprintf(name, ODP_QUEUE_NAME_LEN, "tx-burst-if%d-%03d", - if_num, i); - name[ODP_QUEUE_NAME_LEN - 1] = '\0'; - - odp_queue = odp_queue_create(name, &queue_param); - if (unlikely(odp_queue == ODP_QUEUE_INVALID)) - APPL_EXIT_FAILURE("odp_queue_create() fails:if=%d(%d)", - if_num, i); - tx_burst->queue = odp_queue; - tx_burst->if_port = if_num; - - pktout_idx = i % pktout_num_queues; - pktout_queue = pktio_shm->pktout.queues[if_num][pktout_idx]; - tx_burst->pktout_queue = pktout_queue; - - /* - * Store each tx burst into the tx_burst_stash, stash used when - * draining the available tx-burst buffers. - */ - uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; - int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, - &tx_burst_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("enqueue fails"); - } -} - -static void pktio_tx_buffering_destroy(void) -{ - tx_burst_t *tx_burst; - int num; - - while ((tx_burst = tx_drain_burst_acquire()) != NULL) { - do { - num = odp_queue_deq_multi(tx_burst->queue, - pktio_locm.ev_burst, - MAX_PKT_BURST_TX); - if (unlikely(num <= 0)) - break; - - odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); - odp_event_free_multi(pktio_locm.ev_burst, num); - } while (num > 0); - - odp_queue_destroy(tx_burst->queue); - } -} - -static inline void -pktin_queue_stashing_create(int if_num, pktin_mode_t in_mode) -{ - int num_rx = pktio_shm->pktin.num_queues[if_num]; - uintptr_t uintptr; - int ret; - - for (int i = 0; i < num_rx; i++) { - if (in_mode == PLAIN_QUEUE) { - odp_queue_t queue; - - queue = pktio_shm->pktin.plain_queues[if_num][i]; - uintptr = (uintptr_t)queue; - } else /* DIRECT_RECV*/ { - odp_pktin_queue_t *pktin_qptr; - - pktin_qptr = &pktio_shm->pktin.pktin_queues[if_num][i]; - uintptr = (uintptr_t)pktin_qptr; - } - - /* - * Store the queue or the pktin_queue-ptr as an 'uintptr_t' - * in the stash. - */ - ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, - &uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); - } -} - -static inline void -pktin_queue_queueing_destroy(void) -{ - pktin_mode_t in_mode = pktio_shm->pktin.in_mode; - - if (in_mode == PLAIN_QUEUE) { - while (plain_queue_acquire() != ODP_QUEUE_INVALID) - ; /* empty stash */ - } else if (in_mode == DIRECT_RECV) { - odp_pktin_queue_t *pktin_queue_ptr; - - while (pktin_queue_acquire(&pktin_queue_ptr) == 0) - ; /* empty stash */ - } -} - -static void -set_pktin_vector_params(odp_pktin_queue_param_t *pktin_queue_param, - odp_pool_t vec_pool, - const odp_pktio_capability_t *pktio_capa) -{ - uint32_t vec_size = PKTIO_VEC_SIZE; - uint64_t vec_tmo_ns = PKTIO_VEC_TMO; - - pktin_queue_param->vector.enable = true; - pktin_queue_param->vector.pool = vec_pool; - - if (vec_size > pktio_capa->vector.max_size || - vec_size < pktio_capa->vector.min_size) { - vec_size = (vec_size > pktio_capa->vector.max_size) ? - pktio_capa->vector.max_size : pktio_capa->vector.min_size; - APPL_PRINT("\nWarning: Modified vector size to %u\n\n", vec_size); - } - pktin_queue_param->vector.max_size = vec_size; - - if (vec_tmo_ns > pktio_capa->vector.max_tmo_ns || - vec_tmo_ns < pktio_capa->vector.min_tmo_ns) { - vec_tmo_ns = (vec_tmo_ns > pktio_capa->vector.max_tmo_ns) ? - pktio_capa->vector.max_tmo_ns : pktio_capa->vector.min_tmo_ns; - APPL_PRINT("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns); - } - pktin_queue_param->vector.max_tmo_ns = vec_tmo_ns; -} - -/** Helper to pktio_create() for packet input configuration */ -static void pktin_config(const char *dev, int if_idx, odp_pktio_t pktio, - const odp_pktio_capability_t *pktio_capa, - int if_count, int num_workers, pktin_mode_t in_mode, - bool pktin_vector) -{ - odp_pktin_queue_param_t pktin_queue_param; - int num_rx, max; - int ret; - - odp_pktin_queue_param_init(&pktin_queue_param); - - max = MIN((int)pktio_capa->max_input_queues, PKTIO_MAX_IN_QUEUES); - num_rx = 2 * (ROUND_UP(num_workers, if_count) / if_count); - num_rx = MIN(max, num_rx); - - APPL_PRINT("\tmax number of pktio dev:'%s' input queues:%d, using:%d\n", - dev, pktio_capa->max_input_queues, num_rx); - - pktin_queue_param.hash_enable = 1; - pktin_queue_param.classifier_enable = 0; - pktin_queue_param.hash_proto.proto.ipv4_udp = 1; - pktin_queue_param.num_queues = num_rx; - - if (pktin_polled_mode(in_mode)) { - pktin_queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE; - } else if (pktin_sched_mode(in_mode)) { - pktin_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED; - pktin_queue_param.queue_param.sched.prio = odp_schedule_default_prio(); - if (in_mode == SCHED_PARALLEL) - pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL; - else if (in_mode == SCHED_ATOMIC) - pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC; - else /* in_mode == SCHED_ORDERED */ - pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED; - - pktin_queue_param.queue_param.sched.group = em_odp_qgrp2odp(EM_QUEUE_GROUP_DEFAULT); - - if (pktin_vector) { - if (!pktio_capa->vector.supported) - APPL_EXIT_FAILURE("pktin, dev:'%s': input vectors not supported", - dev); - set_pktin_vector_params(&pktin_queue_param, - pktio_shm->pools.vecpool_odp, - pktio_capa); - } - } - - ret = odp_pktin_queue_config(pktio, &pktin_queue_param); - if (ret < 0) - APPL_EXIT_FAILURE("pktin, dev:'%s': input queue config failed: %d", - dev, ret); - - if (in_mode == PLAIN_QUEUE) { - ret = odp_pktin_event_queue(pktio, pktio_shm->pktin.plain_queues[if_idx]/*out*/, - num_rx); - if (ret != num_rx) - APPL_EXIT_FAILURE("pktin, dev:'%s': plain event queue query failed: %d", - dev, ret); - } else if (pktin_sched_mode(in_mode)) { - odp_queue_t *pktin_sched_queues = &pktio_shm->pktin.sched_queues[if_idx][0]; - em_queue_t *pktin_sched_em_queues = &pktio_shm->pktin.sched_em_queues[if_idx][0]; - - ret = odp_pktin_event_queue(pktio, pktin_sched_queues/*[out]*/, num_rx); - if (ret != num_rx) - APPL_EXIT_FAILURE("pktin, dev:'%s': odp_pktin_event_queue():%d", - dev, ret); - /* - * Create EM queues mapped to the ODP scheduled pktin event queues - */ - ret = em_odp_pktin_event_queues2em(pktin_sched_queues/*[in]*/, - pktin_sched_em_queues/*[out]*/, - num_rx); - if (ret != num_rx) - APPL_EXIT_FAILURE("pktin, dev:'%s': em_odp_pktin_queues2em():%d", - dev, ret); - } else /* DIRECT_RECV */ { - ret = odp_pktin_queue(pktio, pktio_shm->pktin.pktin_queues[if_idx]/*[out]*/, - num_rx); - if (ret != num_rx) - APPL_EXIT_FAILURE("pktin, dev:'%s': direct queue query failed: %d", - dev, ret); - } - - pktio_shm->pktin.num_queues[if_idx] = num_rx; - - if (pktin_polled_mode(in_mode)) { - /* - * Store all pktin queues in a stash - each core 'gets' acquires - * a pktin queue to use from this stash. - */ - pktin_queue_stashing_create(if_idx, in_mode); - } -} - -/** Helper to pktio_create() for packet output configuration */ -static void pktout_config(const char *dev, int if_idx, odp_pktio_t pktio, - const odp_pktio_capability_t *pktio_capa, - int num_workers) -{ - odp_pktout_queue_param_t pktout_queue_param; - odp_pktio_op_mode_t mode_tx; - int num_tx, max; - int ret; - - odp_pktout_queue_param_init(&pktout_queue_param); - mode_tx = ODP_PKTIO_OP_MT; - max = MIN((int)pktio_capa->max_output_queues, PKTIO_MAX_OUT_QUEUES); - num_tx = MIN(2 * num_workers, max); - APPL_PRINT("\tmax number of pktio dev:'%s' output queues:%d, using:%d\n", - dev, pktio_capa->max_output_queues, num_tx); - - pktout_queue_param.num_queues = num_tx; - pktout_queue_param.op_mode = mode_tx; - - ret = odp_pktout_queue_config(pktio, &pktout_queue_param); - if (ret < 0) - APPL_EXIT_FAILURE("pktio output queue config failed dev:'%s' (%d)", - dev, ret); - - ret = odp_pktout_event_queue(pktio, pktio_shm->pktout.queues[if_idx], - num_tx); - if (ret != num_tx || ret > PKTIO_MAX_OUT_QUEUES) - APPL_EXIT_FAILURE("pktio pktout queue query failed dev:'%s' (%d)", - dev, ret); - pktio_shm->pktout.num_queues[if_idx] = num_tx; - - /* Create Tx buffers */ - pktio_tx_buffering_create(if_idx); -} - -int /* if_id */ -pktio_create(const char *dev, pktin_mode_t in_mode, bool pktin_vector, - int if_count, int num_workers) -{ - int if_idx = -1; /* return value */ - odp_pktio_param_t pktio_param; - odp_pktio_t pktio; - odp_pktio_capability_t pktio_capa; - odp_pktio_config_t pktio_config; - odp_pktio_info_t info; - int ret; - - odp_pktio_param_init(&pktio_param); - - /* Packet input mode */ - if (in_mode == DIRECT_RECV) - pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT; - else if (in_mode == PLAIN_QUEUE) - pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE; - else if (pktin_sched_mode(in_mode)) - pktio_param.in_mode = ODP_PKTIN_MODE_SCHED; - else - APPL_EXIT_FAILURE("dev:'%s': unsupported pktin-mode:%d\n", - dev, in_mode); - - /* Packet output mode: QUEUE mode to preserve packet order if needed */ - pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE; - - pktio = odp_pktio_open(dev, pktio_shm->pools.pktpool_odp, &pktio_param); - if (pktio == ODP_PKTIO_INVALID) - APPL_EXIT_FAILURE("pktio create failed for dev:'%s'\n", dev); - - if (odp_pktio_info(pktio, &info)) - APPL_EXIT_FAILURE("pktio info failed dev:'%s'", dev); - - if_idx = odp_pktio_index(pktio); - if (if_idx < 0 || if_idx >= IF_MAX_NUM) - APPL_EXIT_FAILURE("pktio index:%d too large, dev:'%s'", - if_idx, dev); - - APPL_PRINT("\n%s(dev=%s):\n", __func__, dev); - APPL_PRINT("\tcreated pktio:%" PRIu64 " idx:%d, dev:'%s', drv:%s\n", - odp_pktio_to_u64(pktio), if_idx, dev, info.drv_name); - - ret = odp_pktio_capability(pktio, &pktio_capa); - if (ret != 0) - APPL_EXIT_FAILURE("pktio capability query failed: dev:'%s' (%d)", - dev, ret); - - odp_pktio_config_init(&pktio_config); - pktio_config.parser.layer = ODP_PROTO_LAYER_NONE; - /* Provide hint to pktio that packet references are not used */ - pktio_config.pktout.bit.no_packet_refs = 1; - - ret = odp_pktio_config(pktio, &pktio_config); - if (ret != 0) - APPL_EXIT_FAILURE("pktio config failed: dev:'%s' (%d)", - dev, ret); - - /* Pktin (Rx) config */ - pktin_config(dev, if_idx, pktio, &pktio_capa, - if_count, num_workers, in_mode, pktin_vector); - - /* Pktout (Tx) config */ - pktout_config(dev, if_idx, pktio, &pktio_capa, num_workers); - - APPL_PRINT("\tcreated pktio dev:'%s' - input mode:%s, output mode:QUEUE", - dev, pktin_mode_str(in_mode)); - - pktio_shm->ifs.idx[pktio_shm->ifs.num_created] = if_idx; - pktio_shm->ifs.pktio_hdl[if_idx] = pktio; - pktio_shm->ifs.num_created++; - - return if_idx; -} - -void -pktio_start(void) -{ - if (pktio_shm->ifs.num_created != pktio_shm->ifs.count) - APPL_EXIT_FAILURE("Pktio IFs created:%d != IF count:%d", - pktio_shm->ifs.num_created, - pktio_shm->ifs.count); - - for (int i = 0; i < pktio_shm->ifs.count; i++) { - int if_idx = pktio_shm->ifs.idx[i]; - odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; - int ret = odp_pktio_start(pktio); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("Unable to start if:%d", if_idx); - APPL_PRINT("%s(): if:%d\n", __func__, if_idx); - } - - odp_mb_full(); - pktio_shm->pktio_started = 1; -} - -void pktio_halt(void) -{ - pktio_shm->pktio_started = 0; - odp_mb_full(); - APPL_PRINT("\n%s() on EM-core %d\n", __func__, em_core_id()); -} - -void pktio_stop(void) -{ - for (int i = 0; i < pktio_shm->ifs.count; i++) { - int if_idx = pktio_shm->ifs.idx[i]; - odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; - int ret = odp_pktio_stop(pktio); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("Unable to stop if:%d", if_idx); - APPL_PRINT("%s(): if:%d\n", __func__, if_idx); - } -} - -void pktio_close(void) -{ - for (int i = 0; i < pktio_shm->ifs.count; i++) { - int if_idx = pktio_shm->ifs.idx[i]; - odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; - int ret = odp_pktio_close(pktio); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("pktio close failed for if:%d", if_idx); - - pktio_shm->ifs.pktio_hdl[if_idx] = ODP_PKTIO_INVALID; - } - - if (pktin_polled_mode(pktio_shm->pktin.in_mode)) - pktin_queue_queueing_destroy(); - pktio_tx_buffering_destroy(); -} - -static inline int -pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/) -{ - odp_pktin_queue_t *pktin_qptr; - uintptr_t pktin_qptr_uintptr; - - int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, - &pktin_qptr_uintptr, 1); - - if (unlikely(ret != 1)) - return -1; - - pktin_qptr = (odp_pktin_queue_t *)pktin_qptr_uintptr; - - *pktin_queue_ptr = pktin_qptr; - return 0; -} - -static inline void -pktin_queue_release(odp_pktin_queue_t *pktin_queue_ptr) -{ - uintptr_t pktin_qptr_uintptr; - - /* store the pointer as an 'uintptr_t' in the stash */ - pktin_qptr_uintptr = (uintptr_t)pktin_queue_ptr; - - int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, - &pktin_qptr_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); -} - -static inline odp_queue_t -plain_queue_acquire(void) -{ - odp_queue_t queue; - uintptr_t queue_uintptr; - - int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, - &queue_uintptr, 1); - if (unlikely(ret != 1)) - return ODP_QUEUE_INVALID; - - queue = (odp_queue_t)queue_uintptr; - - return queue; -} - -static inline void -plain_queue_release(odp_queue_t queue) -{ - uintptr_t queue_uintptr; - - /* store the queue as an 'uintptr_t' in the stash */ - queue_uintptr = (uintptr_t)queue; - - int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, - &queue_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); -} - -/* - * Helper to the pktin_pollfn_...() functions. - */ -static inline int /* nbr of pkts enqueued */ -pktin_lookup_enqueue(odp_packet_t pkt_tbl[], int pkts) -{ - const table_get_value f_get = pktio_shm->tbl_lookup.ops.f_get; - rx_queue_burst_t *const rx_qbursts = pktio_locm.rx_qbursts; - int pkts_enqueued = 0; /* return value */ - int valid_pkts = 0; - - for (int i = 0; i < pkts; i++) { - const odp_packet_t pkt = pkt_tbl[i]; - void *const pkt_data = odp_packet_data(pkt); - - /* - * If 'pktio_config.parser.layer = - * ODP_PKTIO_PARSER_LAYER_L4;' then the following - * better checks can be used (is slower though). - * if (unlikely(!odp_packet_has_udp(pkt))) { - * odp_packet_free(pkt); - * continue; - * } - * - * pkt_data = odp_packet_data(pkt); - * ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + - * odp_packet_l3_offset(pkt)); - * udp = (odph_udphdr_t *)((uintptr_t)pkt_data + - * odp_packet_l4_offset(pkt)); - */ - - /* Note: no actual checks if the headers are present */ - odph_ipv4hdr_t *const ip = (odph_ipv4hdr_t *) - ((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); - odph_udphdr_t *const udp = (odph_udphdr_t *) - ((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); - /* - * NOTE! network-to-CPU conversion not needed here. - * Setup stores network-order in hash to avoid - * conversion for every packet. - */ - pktio_locm.keys[i].ip_dst = ip->dst_addr; - pktio_locm.keys[i].proto = ip->proto; - pktio_locm.keys[i].port_dst = - likely(ip->proto == ODPH_IPPROTO_UDP || - ip->proto == ODPH_IPPROTO_TCP) ? - udp->dst_port : 0; - } - - for (int i = 0; i < pkts; i++) { - const odp_packet_t pkt = pkt_tbl[i]; - rx_pkt_queue_t rx_pkt_queue; - em_queue_t queue; - int pos; - - /* table(hash) lookup to find queue */ - int ret = f_get(pktio_shm->tbl_lookup.tbl, - &pktio_locm.keys[i], - &rx_pkt_queue, sizeof(rx_pkt_queue_t)); - if (likely(ret == 0)) { - /* found */ - pos = rx_pkt_queue.pos; - queue = rx_pkt_queue.queue; - } else { - /* not found, use default queue if set */ - pos = MAX_RX_PKT_QUEUES; /* reserved space +1*/ - queue = pktio_shm->default_queue; - if (unlikely(queue == EM_QUEUE_UNDEF)) { - odp_packet_free(pkt); - continue; - } - } - - pktio_locm.positions[valid_pkts++] = pos; - rx_qbursts[pos].sent = 0; - rx_qbursts[pos].queue = queue; - rx_qbursts[pos].pkt_tbl[rx_qbursts[pos].pkt_cnt++] = pkt; - } - - for (int i = 0; i < valid_pkts; i++) { - const int pos = pktio_locm.positions[i]; - - if (rx_qbursts[pos].sent) - continue; - - const int num = rx_qbursts[pos].pkt_cnt; - const em_queue_t queue = rx_qbursts[pos].queue; - - /* Enqueue pkts into em-odp */ - pkts_enqueued += em_odp_pkt_enqueue(rx_qbursts[pos].pkt_tbl, - num, queue); - rx_qbursts[pos].sent = 1; - rx_qbursts[pos].pkt_cnt = 0; - } - - return pkts_enqueued; -} - -/* - * User provided function to poll for packet input in DIRECT_RECV-mode, - * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_direct;' - * The function is of type 'em_input_poll_func_t'. See .h file. - */ -int pktin_pollfn_direct(void) -{ - odp_pktin_queue_t *pktin_queue_ptr; - odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; - int ret, pkts; - int poll_rounds = 0; - int pkts_enqueued = 0; /* return value */ - - if (unlikely(!pktio_shm->pktio_started)) - return 0; - - ret = pktin_queue_acquire(&pktin_queue_ptr /*out*/); - if (unlikely(ret != 0)) - return 0; - - do { - pkts = odp_pktin_recv(*pktin_queue_ptr, pkt_tbl, MAX_PKT_BURST_RX); - if (unlikely(pkts <= 0)) - goto pktin_poll_end; - - pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); - - } while (pkts == MAX_PKT_BURST_RX && - ++poll_rounds < MAX_RX_POLL_ROUNDS); - -pktin_poll_end: - pktin_queue_release(pktin_queue_ptr); - - return pkts_enqueued; -} - -/* - * User provided function to poll for packet input in PLAIN_QUEUE-mode, - * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_plainqueue;' - * The function is of type 'em_input_poll_func_t'. See .h file. - */ -int pktin_pollfn_plainqueue(void) -{ - odp_queue_t plain_queue; - odp_event_t ev_tbl[MAX_PKT_BURST_RX]; - odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; - int pkts; - int poll_rounds = 0; - int pkts_enqueued = 0; /* return value */ - - if (unlikely(!pktio_shm->pktio_started)) - return 0; - - plain_queue = plain_queue_acquire(); - if (unlikely(plain_queue == ODP_QUEUE_INVALID)) - return 0; - - do { - pkts = odp_queue_deq_multi(plain_queue, ev_tbl, MAX_PKT_BURST_RX); - if (unlikely(pkts <= 0)) - goto pktin_poll_end; - - odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts); - - pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); - - } while (pkts == MAX_PKT_BURST_RX && - ++poll_rounds < MAX_RX_POLL_ROUNDS); - -pktin_poll_end: - plain_queue_release(plain_queue); - - return pkts_enqueued; -} - -static inline int -pktio_tx_burst(tx_burst_t *const tx_burst) -{ - if (odp_spinlock_is_locked(&tx_burst->lock) || - odp_spinlock_trylock(&tx_burst->lock) == 0) - return 0; - - const int num = odp_queue_deq_multi(tx_burst->queue, - pktio_locm.ev_burst, - MAX_PKT_BURST_TX); - if (unlikely(num <= 0)) { - odp_spinlock_unlock(&tx_burst->lock); - return 0; - } - - odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); - - const odp_queue_t pktout_queue = tx_burst->pktout_queue; - /* Enqueue a tx burst onto the pktio queue for transmission */ - int ret = odp_queue_enq_multi(pktout_queue, pktio_locm.ev_burst, num); - - odp_spinlock_unlock(&tx_burst->lock); - - if (unlikely(ret != num)) { - if (ret < 0) - ret = 0; - odp_event_free_multi(&pktio_locm.ev_burst[ret], num - ret); - } - - return ret; -} - -/** - * @brief User provided output-queue callback function (em_output_func_t). - * - * Transmit events(pkts) via Eth Tx queues. - * - * @return The number of events actually transmitted (<= num) - */ -int pktio_tx(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args) -{ - /* Create idx to select tx-burst, always same idx for same em queue */ - const int burst_idx = (int)((uintptr_t)output_queue % - MAX_TX_BURST_BUFS); - pktio_tx_fn_args_t *const args = output_fn_args; - const int if_port = (int)(args->if_id % IF_MAX_NUM); - /* Select tx-burst onto which to temporaily store pkt/event until tx */ - tx_burst_t *const tx_burst = &pktio_shm->tx_burst[if_port][burst_idx]; - uint64_t prev_cnt; - int ret; - - if (unlikely(num == 0 || !pktio_shm->pktio_started)) - return 0; - - /* Convert into ODP-events */ - odp_event_t odp_events[num]; - - em_odp_events2odp(events, odp_events, num); - - /* - * Mark all events as "free" from EM point of view - ODP will transmit - * and free the events (=odp-pkts). - */ - em_event_mark_free_multi(events, num); - - /* - * 'sched_ctx_type = em_sched_context_type_current(&src_sched_queue)' - * could be used to determine the need for maintaining event order for - * output. Also em_queue_get_type(src_sched_queue) could further be used - * if not caring about a potentially ended sched-context caused by an - * earlier call to em_atomic/ordered_processing_end(). - * Here, none of this is done, since every event will be buffered and - * sent out in order regardless of sched context type or queue type. - */ - - ret = odp_queue_enq_multi(tx_burst->queue, odp_events, num); - if (unlikely(ret < 0)) { - /* failure: don't return, see if a burst can be Tx anyway */ - ret = 0; - } - - prev_cnt = odp_atomic_fetch_add_u64(&tx_burst->cnt, ret); - if (prev_cnt >= MAX_PKT_BURST_TX - 1) - (void)pktio_tx_burst(tx_burst); - - if (unlikely(ret < (int)num)) - em_event_unmark_free_multi(&events[ret], num - ret); - - return ret; -} - -static inline tx_burst_t * -tx_drain_burst_acquire(void) -{ - tx_burst_t *tx_burst; - uintptr_t tx_burst_uintptr; - - int ret = odp_stash_get_ptr(pktio_shm->pktout.tx_burst_stash, - &tx_burst_uintptr, 1); - if (unlikely(ret != 1)) - return NULL; - - tx_burst = (tx_burst_t *)tx_burst_uintptr; - return tx_burst; -} - -static inline void -tx_drain_burst_release(tx_burst_t *tx_burst) { - uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; - - int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, - &tx_burst_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); -} - -/* - * User provided function to drain buffered output, - * given to EM via 'em_conf.output.output_drain_fn = pktout_drainfn;' - * The function is of type 'em_output_drain_func_t' - */ -int pktout_drainfn(void) -{ - const uint64_t curr = odp_cpu_cycles(); /* core-local timestamp */ - const uint64_t prev = pktio_locm.tx_prev_cycles; - const uint64_t diff = likely(curr >= prev) ? - curr - prev : UINT64_MAX - prev + curr + 1; - int ret = 0; - - /* TX burst queue drain */ - if (unlikely(diff > BURST_TX_DRAIN)) { - tx_burst_t *tx_drain_burst = tx_drain_burst_acquire(); - - if (tx_drain_burst) { - ret = pktio_tx_burst(tx_drain_burst); - /* Update timestamp for next round */ - pktio_locm.tx_prev_cycles = curr; - tx_drain_burst_release(tx_drain_burst); - } - } - - return ret; -} - -void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst, - em_queue_t queue) -{ - pkt_q_hash_key_t key; - int ret, idx; - - /* Store in network format to avoid conversion during Rx lookup */ - key.ip_dst = htonl(ipv4_dst); - key.port_dst = htons(port_dst); - key.proto = proto; - - odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); - - idx = pktio_shm->tbl_lookup.tbl_idx; - if (unlikely(idx != pktio_shm->rx_pkt_queues[idx].pos)) { - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - APPL_EXIT_FAILURE("tbl insertion failed, idx(%d) != pos(%d)", - idx, pktio_shm->rx_pkt_queues[idx].pos); - return; - } - - if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); - return; - } - - pktio_shm->rx_pkt_queues[idx].queue = queue; - - ret = pktio_shm->tbl_lookup.ops.f_put(pktio_shm->tbl_lookup.tbl, &key, - &pktio_shm->rx_pkt_queues[idx]); - if (likely(ret == 0)) - pktio_shm->tbl_lookup.tbl_idx++; - - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("tbl insertion failed"); -} - -int pktio_default_queue(em_queue_t queue) -{ - if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { - APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); - return -1; - } - - pktio_shm->default_queue = queue; - - return 0; -} - -em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst) -{ - em_queue_t queue; - rx_pkt_queue_t rx_pkt_queue; - int ret, pos; - /* Store in network format to avoid conversion during Rx lookup */ - pkt_q_hash_key_t key = {.ip_dst = htonl(ipv4_dst), - .port_dst = htons(port_dst), - .proto = proto}; - - /* table(hash) lookup to find queue */ - ret = pktio_shm->tbl_lookup.ops.f_get(pktio_shm->tbl_lookup.tbl, - &key, &rx_pkt_queue, - sizeof(rx_pkt_queue_t)); - - if (likely(ret == 0)) { - /* found */ - pos = rx_pkt_queue.pos; - queue = rx_pkt_queue.queue; - if (unlikely(queue != pktio_shm->rx_pkt_queues[pos].queue)) { - APPL_EXIT_FAILURE("%" PRI_QUEUE "!= %" PRI_QUEUE "", - queue, - pktio_shm->rx_pkt_queues[pos].queue); - return EM_QUEUE_UNDEF; - } - } else { - queue = EM_QUEUE_UNDEF; - } - - return queue; -} - -odp_pool_t pktio_pool_get(void) -{ - return pktio_shm->pools.pktpool_odp; -} +/* + * Copyright (c) 2015-2022, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * + * EM-ODP packet I/O setup + */ +#include +#include +#include +#include +#include + +#include "cm_setup.h" +#include "cm_pktio.h" + +#define PKTIO_PKT_POOL_NUM_BUFS (32 * 1024) +#define PKTIO_PKT_POOL_BUF_SIZE 1536 +#define PKTIO_VEC_POOL_VEC_SIZE 32 +#define PKTIO_VEC_SIZE PKTIO_VEC_POOL_VEC_SIZE +#define PKTIO_VEC_TMO ODP_TIME_MSEC_IN_NS + +static pktio_shm_t *pktio_shm; +static __thread pktio_locm_t pktio_locm ODP_ALIGNED_CACHE; + +static inline tx_burst_t *tx_drain_burst_acquire(void); +static inline int pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/); +static inline odp_queue_t plain_queue_acquire(void); + +const char *pktin_mode_str(pktin_mode_t in_mode) +{ + const char *str; + + switch (in_mode) { + case DIRECT_RECV: + str = "DIRECT_RECV"; + break; + case PLAIN_QUEUE: + str = "PLAIN_QUEUE"; + break; + case SCHED_PARALLEL: + str = "SCHED_PARALLEL"; + break; + case SCHED_ATOMIC: + str = "SCHED_ATOMIC"; + break; + case SCHED_ORDERED: + str = "SCHED_ORDERED"; + break; + default: + str = "UNKNOWN"; + break; + } + + return str; +} + +bool pktin_polled_mode(pktin_mode_t in_mode) +{ + return in_mode == DIRECT_RECV || + in_mode == PLAIN_QUEUE; +} + +bool pktin_sched_mode(pktin_mode_t in_mode) +{ + return in_mode == SCHED_PARALLEL || + in_mode == SCHED_ATOMIC || + in_mode == SCHED_ORDERED; +} + +void pktio_mem_reserve(void) +{ + odp_shm_t shm; + uint32_t flags = 0; + + /* Sanity check: em_shm should not be set yet */ + if (unlikely(pktio_shm != NULL)) + APPL_EXIT_FAILURE("pktio shared memory ptr set - already initialized?"); + + odp_shm_capability_t shm_capa; + int ret = odp_shm_capability(&shm_capa); + + if (unlikely(ret)) + APPL_EXIT_FAILURE("shm capability error:%d", ret); + + if (shm_capa.flags & ODP_SHM_SINGLE_VA) + flags |= ODP_SHM_SINGLE_VA; + + /* Reserve packet I/O shared memory */ + shm = odp_shm_reserve("pktio_shm", sizeof(pktio_shm_t), + ODP_CACHE_LINE_SIZE, flags); + + if (unlikely(shm == ODP_SHM_INVALID)) + APPL_EXIT_FAILURE("pktio shared mem reserve failed."); + + pktio_shm = odp_shm_addr(shm); + if (unlikely(pktio_shm == NULL)) + APPL_EXIT_FAILURE("obtaining pktio shared mem addr failed."); + + memset(pktio_shm, 0, sizeof(pktio_shm_t)); +} + +void pktio_mem_lookup(bool is_thread_per_core) +{ + odp_shm_t shm; + pktio_shm_t *shm_addr; + + shm = odp_shm_lookup("pktio_shm"); + + shm_addr = odp_shm_addr(shm); + if (unlikely(shm_addr == NULL)) + APPL_EXIT_FAILURE("pktio shared mem addr lookup failed."); + + /* + * Set pktio_shm in process-per-core mode, each process has own pointer. + */ + if (!is_thread_per_core && pktio_shm != shm_addr) + pktio_shm = shm_addr; +} + +void pktio_mem_free(void) +{ + odp_shm_t shm; + + shm = odp_shm_lookup("pktio_shm"); + if (unlikely(shm == ODP_SHM_INVALID)) + APPL_EXIT_FAILURE("pktio shared mem lookup for free failed."); + + if (odp_shm_free(shm) != 0) + APPL_EXIT_FAILURE("pktio shared mem free failed."); + pktio_shm = NULL; +} + +/** + * Helper to pktio_pool_create(): create the pktio pool as an EM event-pool + */ +static void pktio_pool_create_em(int if_count, const odp_pool_capability_t *pool_capa) +{ + /* + * Create the pktio pkt pool used for actual input pkts. + * Create the pool as an EM-pool (and convert into an ODP-pool where + * needed) to be able to utilize EM's Event State Verification (ESV) + * in the 'esv.prealloc_pools = true' mode (see config/em-odp.conf). + */ + em_pool_cfg_t pool_cfg; + em_pool_t pool; + + em_pool_cfg_init(&pool_cfg); + pool_cfg.event_type = EM_EVENT_TYPE_PACKET; + pool_cfg.num_subpools = 1; + pool_cfg.subpool[0].size = PKTIO_PKT_POOL_BUF_SIZE; + pool_cfg.subpool[0].num = if_count * PKTIO_PKT_POOL_NUM_BUFS; + /* Use max thread-local pkt-cache size to speed up pktio allocs */ + pool_cfg.subpool[0].cache_size = pool_capa->pkt.max_cache_size; + pool = em_pool_create("pktio-pool-em", EM_POOL_UNDEF, &pool_cfg); + if (pool == EM_POOL_UNDEF) + APPL_EXIT_FAILURE("pktio pool creation failed"); + + /* Convert: EM-pool to ODP-pool */ + odp_pool_t odp_pool = ODP_POOL_INVALID; + int ret = em_odp_pool2odp(pool, &odp_pool, 1); + + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("EM pktio pool creation failed:%d", ret); + + /* Store the EM pktio pool and the corresponding ODP subpool */ + pktio_shm->pools.pktpool_em = pool; + pktio_shm->pools.pktpool_odp = odp_pool; + + odp_pool_print(pktio_shm->pools.pktpool_odp); +} + +/** + * Helper to pktio_pool_create(): create the pktio pool as an ODP pkt-pool + */ +static void pktio_pool_create_odp(int if_count, const odp_pool_capability_t *pool_capa) +{ + odp_pool_param_t pool_params; + + (void)pool_capa; + + odp_pool_param_init(&pool_params); + pool_params.pkt.num = if_count * PKTIO_PKT_POOL_NUM_BUFS; + /* pool_params.pkt.max_num = default */ + pool_params.pkt.len = PKTIO_PKT_POOL_BUF_SIZE; + pool_params.pkt.max_len = PKTIO_PKT_POOL_BUF_SIZE; + pool_params.pkt.seg_len = PKTIO_PKT_POOL_BUF_SIZE; + + pool_params.type = ODP_POOL_PACKET; + pool_params.pkt.uarea_size = em_odp_event_hdr_size(); + + odp_pool_t odp_pool = odp_pool_create("pktio-pool-odp", &pool_params); + + if (odp_pool == ODP_POOL_INVALID) + APPL_EXIT_FAILURE("pktio pool creation failed"); + + /* Store the ODP pktio pool */ + pktio_shm->pools.pktpool_odp = odp_pool; + pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; + + odp_pool_print(pktio_shm->pools.pktpool_odp); +} + +static void pktio_vectorpool_create_em(int if_count, const odp_pool_capability_t *pool_capa) +{ + if (unlikely(pool_capa->vector.max_pools == 0 || + pool_capa->vector.max_size == 0)) + APPL_EXIT_FAILURE("ODP pktin vectors not supported!"); + + uint32_t vec_size = PKTIO_VEC_POOL_VEC_SIZE; + uint32_t num_pkt = PKTIO_PKT_POOL_NUM_BUFS * if_count; + uint32_t num_vec = num_pkt; /* worst case: 1 pkt per vector */ + + if (vec_size > pool_capa->vector.max_size) { + vec_size = pool_capa->vector.max_size; + APPL_PRINT("\nWarning: pktin vector size reduced to %u\n\n", + vec_size); + } + + if (pool_capa->vector.max_num /* 0=limited only by pool memsize */ && + num_vec > pool_capa->vector.max_num) { + num_vec = pool_capa->vector.max_num; + APPL_PRINT("\nWarning: pktin number of vectors reduced to %u\n\n", + num_vec); + } + + em_pool_cfg_t pool_cfg; + + em_pool_cfg_init(&pool_cfg); + pool_cfg.event_type = EM_EVENT_TYPE_VECTOR; + pool_cfg.num_subpools = 1; + + pool_cfg.subpool[0].size = vec_size; /* nbr of events in vector */ + pool_cfg.subpool[0].num = num_vec; + /* Use max thread-local pkt-cache size to speed up pktio allocs */ + pool_cfg.subpool[0].cache_size = pool_capa->pkt.max_cache_size; + + em_pool_t vector_pool = em_pool_create("vector-pool-em", EM_POOL_UNDEF, &pool_cfg); + + if (vector_pool == EM_POOL_UNDEF) + APPL_EXIT_FAILURE("EM vector pool create failed"); + + /* Convert: EM-pool to ODP-pool */ + odp_pool_t odp_vecpool = ODP_POOL_INVALID; + int ret = em_odp_pool2odp(vector_pool, &odp_vecpool, 1); + + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("EM pktio pool creation failed:%d", ret); + + /* Store the EM pktio pool and the corresponding ODP subpool */ + pktio_shm->pools.vecpool_em = vector_pool; + pktio_shm->pools.vecpool_odp = odp_vecpool; + + odp_pool_print(odp_vecpool); +} + +static void pktio_vectorpool_create_odp(int if_count, const odp_pool_capability_t *pool_capa) +{ + odp_pool_param_t pool_params; + + odp_pool_param_init(&pool_params); + + pool_params.type = ODP_POOL_VECTOR; + + if (unlikely(pool_capa->vector.max_pools == 0 || + pool_capa->vector.max_size == 0)) + APPL_EXIT_FAILURE("ODP pktin vectors not supported!"); + + uint32_t vec_size = PKTIO_VEC_POOL_VEC_SIZE; + uint32_t num_pkt = PKTIO_PKT_POOL_NUM_BUFS * if_count; + uint32_t num_vec = num_pkt; /* worst case: 1 pkt per vector */ + + if (vec_size > pool_capa->vector.max_size) { + vec_size = pool_capa->vector.max_size; + APPL_PRINT("\nWarning: pktin vector size reduced to %u\n\n", + vec_size); + } + + if (pool_capa->vector.max_num /* 0=limited only by pool memsize */ && + num_vec > pool_capa->vector.max_num) { + num_vec = pool_capa->vector.max_num; + APPL_PRINT("\nWarning: pktin number of vectors reduced to %u\n\n", + num_vec); + } + + pool_params.vector.num = num_vec; + pool_params.vector.max_size = vec_size; + pool_params.vector.uarea_size = em_odp_event_hdr_size(); + + odp_pool_t vector_pool = odp_pool_create("vector-pool-odp", &pool_params); + + if (vector_pool == ODP_POOL_INVALID) + APPL_EXIT_FAILURE("ODP vector pool create failed"); + + pktio_shm->pools.vecpool_odp = vector_pool; + + odp_pool_print(vector_pool); +} + +/** + * Create the memory pool used by pkt-io + */ +void pktio_pool_create(int if_count, bool pktpool_em, + bool pktin_vector, bool vecpool_em) +{ + odp_pool_capability_t pool_capa; + + if (odp_pool_capability(&pool_capa) != 0) + APPL_EXIT_FAILURE("Can't get odp-pool capability"); + /* + * Create the pktio pkt pool used for actual input pkts. + * Create the pool either as an EM- or ODP-pool. + */ + if (pktpool_em) + pktio_pool_create_em(if_count, &pool_capa); + else + pktio_pool_create_odp(if_count, &pool_capa); + + if (pktin_vector) { + if (vecpool_em) + pktio_vectorpool_create_em(if_count, &pool_capa); + else + pktio_vectorpool_create_odp(if_count, &pool_capa); + } +} + +/** + * Helper to pktio_pool_destroy(): destroy the EM event-pool used for pktio + */ +static void pktio_pool_destroy_em(void) +{ + APPL_PRINT("\n%s(): deleting the EM pktio-pool:\n", __func__); + em_pool_info_print(pktio_shm->pools.pktpool_em); + + if (em_pool_delete(pktio_shm->pools.pktpool_em) != EM_OK) + APPL_EXIT_FAILURE("EM pktio-pool delete failed."); + + pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; + pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; +} + +/** + * Helper to pktio_pool_destroy(): destroy the ODP pkt-pool used for pktio + */ +static void pktio_pool_destroy_odp(void) +{ + APPL_PRINT("\n%s(): destroying the ODP pktio-pool\n", __func__); + if (odp_pool_destroy(pktio_shm->pools.pktpool_odp) != 0) + APPL_EXIT_FAILURE("ODP pktio-pool destroy failed."); + + pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; +} + +/** + * Helper to pktio_pool_destroy(): destroy the pktin EM vector pool + */ +static void pktio_vectorpool_destroy_em(void) +{ + APPL_PRINT("\n%s(): deleting the EM vector-pool:\n", __func__); + em_pool_info_print(pktio_shm->pools.vecpool_em); + + if (em_pool_delete(pktio_shm->pools.vecpool_em) != EM_OK) + APPL_EXIT_FAILURE("EM pktio-pool delete failed."); + + pktio_shm->pools.vecpool_em = EM_POOL_UNDEF; + pktio_shm->pools.vecpool_odp = ODP_POOL_INVALID; +} + +/** + * Helper to pktio_pool_destroy(): destroy the ODP pktin vector pool + */ +static void pktio_vectorpool_destroy_odp(void) +{ + APPL_PRINT("\n%s(): destroying the ODP pktin vector-pool\n", __func__); + if (odp_pool_destroy(pktio_shm->pools.vecpool_odp) != 0) + APPL_EXIT_FAILURE("ODP pktin vector-pool destroy failed."); + + pktio_shm->pools.vecpool_odp = ODP_POOL_INVALID; +} + +/** + * Destroy the memory pool used by pkt-io + */ +void pktio_pool_destroy(bool pktpool_em, bool pktin_vector, bool vecpool_em) +{ + if (pktpool_em) + pktio_pool_destroy_em(); + else + pktio_pool_destroy_odp(); + + if (pktin_vector) { + if (vecpool_em) + pktio_vectorpool_destroy_em(); + else + pktio_vectorpool_destroy_odp(); + } +} + +void pktio_init(const appl_conf_t *appl_conf) +{ + pktin_mode_t in_mode = appl_conf->pktio.in_mode; + odp_stash_capability_t stash_capa; + odp_stash_param_t stash_param; + odp_stash_t stash; + int ret; + + pktio_shm->ifs.count = appl_conf->pktio.if_count; + pktio_shm->ifs.num_created = 0; + pktio_shm->default_queue = EM_QUEUE_UNDEF; + + pktio_shm->pktin.in_mode = in_mode; + pktio_shm->pktin.pktin_queue_stash = ODP_STASH_INVALID; + + ret = odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO); + if (ret != 0) + APPL_EXIT_FAILURE("odp_stash_capability() fails:%d", ret); + + if (pktin_polled_mode(in_mode)) { + /* + * Create a stash to hold the shared queues used in pkt input. Each core + * needs to get one queue to be able to use it to receive packets. + * DIRECT_RECV-mode: the stash contains pointers to odp_pktin_queue_t:s + * PLAIN_QUEUE-mode: the stash contains odp_queue_t:s + */ + odp_stash_param_init(&stash_param); + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_MT; + stash_param.get_mode = ODP_STASH_OP_MT; + stash_param.num_obj = PKTIO_MAX_IN_QUEUES * IF_MAX_NUM; + if (stash_param.num_obj > stash_capa.max_num_obj) + APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", + stash_param.num_obj); + stash_param.obj_size = MAX(sizeof(odp_queue_t), sizeof(odp_pktin_queue_t *)); + if (!POWEROF2(stash_param.obj_size) || + stash_param.obj_size != sizeof(uintptr_t) || + stash_param.obj_size > stash_capa.max_obj_size) { + APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u, max:%u", + stash_param.obj_size, stash_capa.max_obj_size); + } + stash_param.cache_size = 0; /* No core local caching */ + + stash = odp_stash_create("pktin.pktin_queue_stash", &stash_param); + if (stash == ODP_STASH_INVALID) + APPL_EXIT_FAILURE("odp_stash_create() fails"); + + pktio_shm->pktin.pktin_queue_stash = stash; + } + + /* + * Create a stash to hold the shared tx-burst buffers, + * used when draining the available tx-burst buffers + */ + odp_stash_param_init(&stash_param); + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_MT; + stash_param.get_mode = ODP_STASH_OP_MT; + stash_param.num_obj = MAX_TX_BURST_BUFS * IF_MAX_NUM; + if (stash_param.num_obj > stash_capa.max_num_obj) + APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", + stash_param.num_obj); + stash_param.obj_size = sizeof(tx_burst_t *); /* stash pointers */ + if (!POWEROF2(stash_param.obj_size) || + stash_param.obj_size != sizeof(uintptr_t) || + stash_param.obj_size > stash_capa.max_obj_size) { + APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u", + stash_param.obj_size); + } + stash_param.cache_size = 0; /* No core local caching */ + + stash = odp_stash_create("pktout.tx-burst-stash", &stash_param); + if (stash == ODP_STASH_INVALID) + APPL_EXIT_FAILURE("odp_stash_create() fails"); + pktio_shm->pktout.tx_burst_stash = stash; + + /* Misc inits: */ + for (int i = 0; i < MAX_RX_PKT_QUEUES; i++) { + pktio_shm->rx_pkt_queues[i].pos = i; + pktio_shm->rx_pkt_queues[i].queue = EM_QUEUE_UNDEF; + } + + odp_ticketlock_init(&pktio_shm->tbl_lookup.lock); + pktio_shm->tbl_lookup.tbl_idx = 0; + pktio_shm->tbl_lookup.ops = cuckoo_table_ops; + odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); + pktio_shm->tbl_lookup.tbl = + pktio_shm->tbl_lookup.ops.f_create("RX-lookup-tbl", MAX_RX_PKT_QUEUES, + sizeof(pkt_q_hash_key_t), + sizeof(rx_pkt_queue_t)); + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + if (unlikely(pktio_shm->tbl_lookup.tbl == NULL)) + APPL_EXIT_FAILURE("rx pkt lookup table creation fails"); +} + +void pktio_deinit(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + + if (pktin_polled_mode(appl_conf->pktio.in_mode)) + odp_stash_destroy(pktio_shm->pktin.pktin_queue_stash); + odp_stash_destroy(pktio_shm->pktout.tx_burst_stash); + + pktio_shm->tbl_lookup.ops.f_des(pktio_shm->tbl_lookup.tbl); +} + +static void pktio_tx_buffering_create(int if_num) +{ + tx_burst_t *tx_burst; + odp_queue_param_t queue_param; + odp_queue_t odp_queue; + int pktout_idx; + odp_queue_t pktout_queue; + char name[ODP_QUEUE_NAME_LEN]; + + const int pktout_num_queues = pktio_shm->pktout.num_queues[if_num]; + + for (int i = 0; i < MAX_TX_BURST_BUFS; i++) { + tx_burst = &pktio_shm->tx_burst[if_num][i]; + + odp_atomic_init_u64(&tx_burst->cnt, 0); + odp_spinlock_init(&tx_burst->lock); + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_PLAIN; + queue_param.enq_mode = ODP_QUEUE_OP_MT; + queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; + /* ignore odp ordering, EM handles output order, just buffer */ + queue_param.order = ODP_QUEUE_ORDER_IGNORE; + + snprintf(name, ODP_QUEUE_NAME_LEN, "tx-burst-if%d-%03d", + if_num, i); + name[ODP_QUEUE_NAME_LEN - 1] = '\0'; + + odp_queue = odp_queue_create(name, &queue_param); + if (unlikely(odp_queue == ODP_QUEUE_INVALID)) + APPL_EXIT_FAILURE("odp_queue_create() fails:if=%d(%d)", + if_num, i); + tx_burst->queue = odp_queue; + tx_burst->if_port = if_num; + + pktout_idx = i % pktout_num_queues; + pktout_queue = pktio_shm->pktout.queues[if_num][pktout_idx]; + tx_burst->pktout_queue = pktout_queue; + + /* + * Store each tx burst into the tx_burst_stash, stash used when + * draining the available tx-burst buffers. + */ + uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; + int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, + &tx_burst_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("enqueue fails"); + } +} + +static void pktio_tx_buffering_destroy(void) +{ + tx_burst_t *tx_burst; + int num; + + while ((tx_burst = tx_drain_burst_acquire()) != NULL) { + do { + num = odp_queue_deq_multi(tx_burst->queue, + pktio_locm.ev_burst, + MAX_PKT_BURST_TX); + if (unlikely(num <= 0)) + break; + + odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); + odp_event_free_multi(pktio_locm.ev_burst, num); + } while (num > 0); + + odp_queue_destroy(tx_burst->queue); + } +} + +static inline void +pktin_queue_stashing_create(int if_num, pktin_mode_t in_mode) +{ + int num_rx = pktio_shm->pktin.num_queues[if_num]; + uintptr_t uintptr; + int ret; + + for (int i = 0; i < num_rx; i++) { + if (in_mode == PLAIN_QUEUE) { + odp_queue_t queue; + + queue = pktio_shm->pktin.plain_queues[if_num][i]; + uintptr = (uintptr_t)queue; + } else /* DIRECT_RECV*/ { + odp_pktin_queue_t *pktin_qptr; + + pktin_qptr = &pktio_shm->pktin.pktin_queues[if_num][i]; + uintptr = (uintptr_t)pktin_qptr; + } + + /* + * Store the queue or the pktin_queue-ptr as an 'uintptr_t' + * in the stash. + */ + ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, + &uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); + } +} + +static inline void +pktin_queue_queueing_destroy(void) +{ + pktin_mode_t in_mode = pktio_shm->pktin.in_mode; + + if (in_mode == PLAIN_QUEUE) { + while (plain_queue_acquire() != ODP_QUEUE_INVALID) + ; /* empty stash */ + } else if (in_mode == DIRECT_RECV) { + odp_pktin_queue_t *pktin_queue_ptr; + + while (pktin_queue_acquire(&pktin_queue_ptr) == 0) + ; /* empty stash */ + } +} + +static void +set_pktin_vector_params(odp_pktin_queue_param_t *pktin_queue_param, + odp_pool_t vec_pool, + const odp_pktio_capability_t *pktio_capa) +{ + uint32_t vec_size = PKTIO_VEC_SIZE; + uint64_t vec_tmo_ns = PKTIO_VEC_TMO; + + pktin_queue_param->vector.enable = true; + pktin_queue_param->vector.pool = vec_pool; + + if (vec_size > pktio_capa->vector.max_size || + vec_size < pktio_capa->vector.min_size) { + vec_size = (vec_size > pktio_capa->vector.max_size) ? + pktio_capa->vector.max_size : pktio_capa->vector.min_size; + APPL_PRINT("\nWarning: Modified vector size to %u\n\n", vec_size); + } + pktin_queue_param->vector.max_size = vec_size; + + if (vec_tmo_ns > pktio_capa->vector.max_tmo_ns || + vec_tmo_ns < pktio_capa->vector.min_tmo_ns) { + vec_tmo_ns = (vec_tmo_ns > pktio_capa->vector.max_tmo_ns) ? + pktio_capa->vector.max_tmo_ns : pktio_capa->vector.min_tmo_ns; + APPL_PRINT("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns); + } + pktin_queue_param->vector.max_tmo_ns = vec_tmo_ns; +} + +/** Helper to pktio_create() for packet input configuration */ +static void pktin_config(const char *dev, int if_idx, odp_pktio_t pktio, + const odp_pktio_capability_t *pktio_capa, + int if_count, int num_workers, pktin_mode_t in_mode, + bool pktin_vector) +{ + odp_pktin_queue_param_t pktin_queue_param; + int num_rx, max; + int ret; + + odp_pktin_queue_param_init(&pktin_queue_param); + + max = MIN((int)pktio_capa->max_input_queues, PKTIO_MAX_IN_QUEUES); + num_rx = 2 * (ROUND_UP(num_workers, if_count) / if_count); + num_rx = MIN(max, num_rx); + + APPL_PRINT("\tmax number of pktio dev:'%s' input queues:%d, using:%d\n", + dev, pktio_capa->max_input_queues, num_rx); + + pktin_queue_param.hash_enable = 1; + pktin_queue_param.classifier_enable = 0; + pktin_queue_param.hash_proto.proto.ipv4_udp = 1; + pktin_queue_param.num_queues = num_rx; + + if (pktin_polled_mode(in_mode)) { + pktin_queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE; + } else if (pktin_sched_mode(in_mode)) { + pktin_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED; + pktin_queue_param.queue_param.sched.prio = odp_schedule_default_prio(); + if (in_mode == SCHED_PARALLEL) + pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL; + else if (in_mode == SCHED_ATOMIC) + pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC; + else /* in_mode == SCHED_ORDERED */ + pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED; + + pktin_queue_param.queue_param.sched.group = em_odp_qgrp2odp(EM_QUEUE_GROUP_DEFAULT); + + if (pktin_vector) { + if (!pktio_capa->vector.supported) + APPL_EXIT_FAILURE("pktin, dev:'%s': input vectors not supported", + dev); + set_pktin_vector_params(&pktin_queue_param, + pktio_shm->pools.vecpool_odp, + pktio_capa); + } + } + + ret = odp_pktin_queue_config(pktio, &pktin_queue_param); + if (ret < 0) + APPL_EXIT_FAILURE("pktin, dev:'%s': input queue config failed: %d", + dev, ret); + + if (in_mode == PLAIN_QUEUE) { + ret = odp_pktin_event_queue(pktio, pktio_shm->pktin.plain_queues[if_idx]/*out*/, + num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': plain event queue query failed: %d", + dev, ret); + } else if (pktin_sched_mode(in_mode)) { + odp_queue_t *pktin_sched_queues = &pktio_shm->pktin.sched_queues[if_idx][0]; + em_queue_t *pktin_sched_em_queues = &pktio_shm->pktin.sched_em_queues[if_idx][0]; + + ret = odp_pktin_event_queue(pktio, pktin_sched_queues/*[out]*/, num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': odp_pktin_event_queue():%d", + dev, ret); + /* + * Create EM queues mapped to the ODP scheduled pktin event queues + */ + ret = em_odp_pktin_event_queues2em(pktin_sched_queues/*[in]*/, + pktin_sched_em_queues/*[out]*/, + num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': em_odp_pktin_queues2em():%d", + dev, ret); + } else /* DIRECT_RECV */ { + ret = odp_pktin_queue(pktio, pktio_shm->pktin.pktin_queues[if_idx]/*[out]*/, + num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': direct queue query failed: %d", + dev, ret); + } + + pktio_shm->pktin.num_queues[if_idx] = num_rx; + + if (pktin_polled_mode(in_mode)) { + /* + * Store all pktin queues in a stash - each core 'gets' acquires + * a pktin queue to use from this stash. + */ + pktin_queue_stashing_create(if_idx, in_mode); + } +} + +/** Helper to pktio_create() for packet output configuration */ +static void pktout_config(const char *dev, int if_idx, odp_pktio_t pktio, + const odp_pktio_capability_t *pktio_capa, + int num_workers) +{ + odp_pktout_queue_param_t pktout_queue_param; + odp_pktio_op_mode_t mode_tx; + int num_tx, max; + int ret; + + odp_pktout_queue_param_init(&pktout_queue_param); + mode_tx = ODP_PKTIO_OP_MT; + max = MIN((int)pktio_capa->max_output_queues, PKTIO_MAX_OUT_QUEUES); + num_tx = MIN(2 * num_workers, max); + APPL_PRINT("\tmax number of pktio dev:'%s' output queues:%d, using:%d\n", + dev, pktio_capa->max_output_queues, num_tx); + + pktout_queue_param.num_queues = num_tx; + pktout_queue_param.op_mode = mode_tx; + + ret = odp_pktout_queue_config(pktio, &pktout_queue_param); + if (ret < 0) + APPL_EXIT_FAILURE("pktio output queue config failed dev:'%s' (%d)", + dev, ret); + + ret = odp_pktout_event_queue(pktio, pktio_shm->pktout.queues[if_idx], + num_tx); + if (ret != num_tx || ret > PKTIO_MAX_OUT_QUEUES) + APPL_EXIT_FAILURE("pktio pktout queue query failed dev:'%s' (%d)", + dev, ret); + pktio_shm->pktout.num_queues[if_idx] = num_tx; + + /* Create Tx buffers */ + pktio_tx_buffering_create(if_idx); +} + +int /* if_id */ +pktio_create(const char *dev, pktin_mode_t in_mode, bool pktin_vector, + int if_count, int num_workers) +{ + int if_idx = -1; /* return value */ + odp_pktio_param_t pktio_param; + odp_pktio_t pktio; + odp_pktio_capability_t pktio_capa; + odp_pktio_config_t pktio_config; + odp_pktio_info_t info; + int ret; + + odp_pktio_param_init(&pktio_param); + + /* Packet input mode */ + if (in_mode == DIRECT_RECV) + pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT; + else if (in_mode == PLAIN_QUEUE) + pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE; + else if (pktin_sched_mode(in_mode)) + pktio_param.in_mode = ODP_PKTIN_MODE_SCHED; + else + APPL_EXIT_FAILURE("dev:'%s': unsupported pktin-mode:%d\n", + dev, in_mode); + + /* Packet output mode: QUEUE mode to preserve packet order if needed */ + pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE; + + pktio = odp_pktio_open(dev, pktio_shm->pools.pktpool_odp, &pktio_param); + if (pktio == ODP_PKTIO_INVALID) + APPL_EXIT_FAILURE("pktio create failed for dev:'%s'\n", dev); + + if (odp_pktio_info(pktio, &info)) + APPL_EXIT_FAILURE("pktio info failed dev:'%s'", dev); + + if_idx = odp_pktio_index(pktio); + if (if_idx < 0 || if_idx >= IF_MAX_NUM) + APPL_EXIT_FAILURE("pktio index:%d too large, dev:'%s'", + if_idx, dev); + + APPL_PRINT("\n%s(dev=%s):\n", __func__, dev); + APPL_PRINT("\tcreated pktio:%" PRIu64 " idx:%d, dev:'%s', drv:%s\n", + odp_pktio_to_u64(pktio), if_idx, dev, info.drv_name); + + ret = odp_pktio_capability(pktio, &pktio_capa); + if (ret != 0) + APPL_EXIT_FAILURE("pktio capability query failed: dev:'%s' (%d)", + dev, ret); + + odp_pktio_config_init(&pktio_config); + pktio_config.parser.layer = ODP_PROTO_LAYER_NONE; + /* Provide hint to pktio that packet references are not used */ + pktio_config.pktout.bit.no_packet_refs = 1; + + ret = odp_pktio_config(pktio, &pktio_config); + if (ret != 0) + APPL_EXIT_FAILURE("pktio config failed: dev:'%s' (%d)", + dev, ret); + + /* Pktin (Rx) config */ + pktin_config(dev, if_idx, pktio, &pktio_capa, + if_count, num_workers, in_mode, pktin_vector); + + /* Pktout (Tx) config */ + pktout_config(dev, if_idx, pktio, &pktio_capa, num_workers); + + APPL_PRINT("\tcreated pktio dev:'%s' - input mode:%s, output mode:QUEUE", + dev, pktin_mode_str(in_mode)); + + pktio_shm->ifs.idx[pktio_shm->ifs.num_created] = if_idx; + pktio_shm->ifs.pktio_hdl[if_idx] = pktio; + pktio_shm->ifs.num_created++; + + return if_idx; +} + +void +pktio_start(void) +{ + if (pktio_shm->ifs.num_created != pktio_shm->ifs.count) + APPL_EXIT_FAILURE("Pktio IFs created:%d != IF count:%d", + pktio_shm->ifs.num_created, + pktio_shm->ifs.count); + + for (int i = 0; i < pktio_shm->ifs.count; i++) { + int if_idx = pktio_shm->ifs.idx[i]; + odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; + int ret = odp_pktio_start(pktio); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("Unable to start if:%d", if_idx); + APPL_PRINT("%s(): if:%d\n", __func__, if_idx); + } + + odp_mb_full(); + pktio_shm->pktio_started = 1; +} + +void pktio_halt(void) +{ + pktio_shm->pktio_started = 0; + odp_mb_full(); + APPL_PRINT("\n%s() on EM-core %d\n", __func__, em_core_id()); +} + +void pktio_stop(void) +{ + for (int i = 0; i < pktio_shm->ifs.count; i++) { + int if_idx = pktio_shm->ifs.idx[i]; + odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; + int ret = odp_pktio_stop(pktio); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("Unable to stop if:%d", if_idx); + APPL_PRINT("%s(): if:%d\n", __func__, if_idx); + } +} + +void pktio_close(void) +{ + for (int i = 0; i < pktio_shm->ifs.count; i++) { + int if_idx = pktio_shm->ifs.idx[i]; + odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; + int ret = odp_pktio_close(pktio); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("pktio close failed for if:%d", if_idx); + + pktio_shm->ifs.pktio_hdl[if_idx] = ODP_PKTIO_INVALID; + } + + if (pktin_polled_mode(pktio_shm->pktin.in_mode)) + pktin_queue_queueing_destroy(); + pktio_tx_buffering_destroy(); +} + +static inline int +pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/) +{ + odp_pktin_queue_t *pktin_qptr; + uintptr_t pktin_qptr_uintptr; + + int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, + &pktin_qptr_uintptr, 1); + + if (unlikely(ret != 1)) + return -1; + + pktin_qptr = (odp_pktin_queue_t *)pktin_qptr_uintptr; + + *pktin_queue_ptr = pktin_qptr; + return 0; +} + +static inline void +pktin_queue_release(odp_pktin_queue_t *pktin_queue_ptr) +{ + uintptr_t pktin_qptr_uintptr; + + /* store the pointer as an 'uintptr_t' in the stash */ + pktin_qptr_uintptr = (uintptr_t)pktin_queue_ptr; + + int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, + &pktin_qptr_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); +} + +static inline odp_queue_t +plain_queue_acquire(void) +{ + odp_queue_t queue; + uintptr_t queue_uintptr; + + int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, + &queue_uintptr, 1); + if (unlikely(ret != 1)) + return ODP_QUEUE_INVALID; + + queue = (odp_queue_t)queue_uintptr; + + return queue; +} + +static inline void +plain_queue_release(odp_queue_t queue) +{ + uintptr_t queue_uintptr; + + /* store the queue as an 'uintptr_t' in the stash */ + queue_uintptr = (uintptr_t)queue; + + int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, + &queue_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); +} + +/* + * Helper to the pktin_pollfn_...() functions. + */ +static inline int /* nbr of pkts enqueued */ +pktin_lookup_enqueue(odp_packet_t pkt_tbl[], int pkts) +{ + const table_get_value f_get = pktio_shm->tbl_lookup.ops.f_get; + rx_queue_burst_t *const rx_qbursts = pktio_locm.rx_qbursts; + int pkts_enqueued = 0; /* return value */ + int valid_pkts = 0; + + for (int i = 0; i < pkts; i++) { + const odp_packet_t pkt = pkt_tbl[i]; + void *const pkt_data = odp_packet_data(pkt); + + /* + * If 'pktio_config.parser.layer = + * ODP_PKTIO_PARSER_LAYER_L4;' then the following + * better checks can be used (is slower though). + * if (unlikely(!odp_packet_has_udp(pkt))) { + * odp_packet_free(pkt); + * continue; + * } + * + * pkt_data = odp_packet_data(pkt); + * ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + + * odp_packet_l3_offset(pkt)); + * udp = (odph_udphdr_t *)((uintptr_t)pkt_data + + * odp_packet_l4_offset(pkt)); + */ + + /* Note: no actual checks if the headers are present */ + odph_ipv4hdr_t *const ip = (odph_ipv4hdr_t *) + ((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); + odph_udphdr_t *const udp = (odph_udphdr_t *) + ((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); + /* + * NOTE! network-to-CPU conversion not needed here. + * Setup stores network-order in hash to avoid + * conversion for every packet. + */ + pktio_locm.keys[i].ip_dst = ip->dst_addr; + pktio_locm.keys[i].proto = ip->proto; + pktio_locm.keys[i].port_dst = + likely(ip->proto == ODPH_IPPROTO_UDP || + ip->proto == ODPH_IPPROTO_TCP) ? + udp->dst_port : 0; + } + + for (int i = 0; i < pkts; i++) { + const odp_packet_t pkt = pkt_tbl[i]; + rx_pkt_queue_t rx_pkt_queue; + em_queue_t queue; + int pos; + + /* table(hash) lookup to find queue */ + int ret = f_get(pktio_shm->tbl_lookup.tbl, + &pktio_locm.keys[i], + &rx_pkt_queue, sizeof(rx_pkt_queue_t)); + if (likely(ret == 0)) { + /* found */ + pos = rx_pkt_queue.pos; + queue = rx_pkt_queue.queue; + } else { + /* not found, use default queue if set */ + pos = MAX_RX_PKT_QUEUES; /* reserved space +1*/ + queue = pktio_shm->default_queue; + if (unlikely(queue == EM_QUEUE_UNDEF)) { + odp_packet_free(pkt); + continue; + } + } + + pktio_locm.positions[valid_pkts++] = pos; + rx_qbursts[pos].sent = 0; + rx_qbursts[pos].queue = queue; + rx_qbursts[pos].pkt_tbl[rx_qbursts[pos].pkt_cnt++] = pkt; + } + + for (int i = 0; i < valid_pkts; i++) { + const int pos = pktio_locm.positions[i]; + + if (rx_qbursts[pos].sent) + continue; + + const int num = rx_qbursts[pos].pkt_cnt; + const em_queue_t queue = rx_qbursts[pos].queue; + + /* Enqueue pkts into em-odp */ + pkts_enqueued += em_odp_pkt_enqueue(rx_qbursts[pos].pkt_tbl, + num, queue); + rx_qbursts[pos].sent = 1; + rx_qbursts[pos].pkt_cnt = 0; + } + + return pkts_enqueued; +} + +/* + * User provided function to poll for packet input in DIRECT_RECV-mode, + * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_direct;' + * The function is of type 'em_input_poll_func_t'. See .h file. + */ +int pktin_pollfn_direct(void) +{ + odp_pktin_queue_t *pktin_queue_ptr; + odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; + int ret, pkts; + int poll_rounds = 0; + int pkts_enqueued = 0; /* return value */ + + if (unlikely(!pktio_shm->pktio_started)) + return 0; + + ret = pktin_queue_acquire(&pktin_queue_ptr /*out*/); + if (unlikely(ret != 0)) + return 0; + + do { + pkts = odp_pktin_recv(*pktin_queue_ptr, pkt_tbl, MAX_PKT_BURST_RX); + if (unlikely(pkts <= 0)) + goto pktin_poll_end; + + pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); + + } while (pkts == MAX_PKT_BURST_RX && + ++poll_rounds < MAX_RX_POLL_ROUNDS); + +pktin_poll_end: + pktin_queue_release(pktin_queue_ptr); + + return pkts_enqueued; +} + +/* + * User provided function to poll for packet input in PLAIN_QUEUE-mode, + * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_plainqueue;' + * The function is of type 'em_input_poll_func_t'. See .h file. + */ +int pktin_pollfn_plainqueue(void) +{ + odp_queue_t plain_queue; + odp_event_t ev_tbl[MAX_PKT_BURST_RX]; + odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; + int pkts; + int poll_rounds = 0; + int pkts_enqueued = 0; /* return value */ + + if (unlikely(!pktio_shm->pktio_started)) + return 0; + + plain_queue = plain_queue_acquire(); + if (unlikely(plain_queue == ODP_QUEUE_INVALID)) + return 0; + + do { + pkts = odp_queue_deq_multi(plain_queue, ev_tbl, MAX_PKT_BURST_RX); + if (unlikely(pkts <= 0)) + goto pktin_poll_end; + + odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts); + + pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); + + } while (pkts == MAX_PKT_BURST_RX && + ++poll_rounds < MAX_RX_POLL_ROUNDS); + +pktin_poll_end: + plain_queue_release(plain_queue); + + return pkts_enqueued; +} + +static inline int +pktio_tx_burst(tx_burst_t *const tx_burst) +{ + if (odp_spinlock_is_locked(&tx_burst->lock) || + odp_spinlock_trylock(&tx_burst->lock) == 0) + return 0; + + const int num = odp_queue_deq_multi(tx_burst->queue, + pktio_locm.ev_burst, + MAX_PKT_BURST_TX); + if (unlikely(num <= 0)) { + odp_spinlock_unlock(&tx_burst->lock); + return 0; + } + + odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); + + const odp_queue_t pktout_queue = tx_burst->pktout_queue; + /* Enqueue a tx burst onto the pktio queue for transmission */ + int ret = odp_queue_enq_multi(pktout_queue, pktio_locm.ev_burst, num); + + odp_spinlock_unlock(&tx_burst->lock); + + if (unlikely(ret != num)) { + if (ret < 0) + ret = 0; + odp_event_free_multi(&pktio_locm.ev_burst[ret], num - ret); + } + + return ret; +} + +/** + * @brief User provided output-queue callback function (em_output_func_t). + * + * Transmit events(pkts) via Eth Tx queues. + * + * @return The number of events actually transmitted (<= num) + */ +int pktio_tx(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args) +{ + /* Create idx to select tx-burst, always same idx for same em queue */ + const int burst_idx = (int)((uintptr_t)output_queue % + MAX_TX_BURST_BUFS); + pktio_tx_fn_args_t *const args = output_fn_args; + const int if_port = (int)(args->if_id % IF_MAX_NUM); + /* Select tx-burst onto which to temporarily store pkt/event until tx */ + tx_burst_t *const tx_burst = &pktio_shm->tx_burst[if_port][burst_idx]; + uint64_t prev_cnt; + int ret; + + if (unlikely(num == 0 || !pktio_shm->pktio_started)) + return 0; + + /* Convert into ODP-events */ + odp_event_t odp_events[num]; + + em_odp_events2odp(events, odp_events, num); + + /* + * Mark all events as "free" from EM point of view - ODP will transmit + * and free the events (=odp-pkts). + */ + em_event_mark_free_multi(events, num); + + /* + * 'sched_ctx_type = em_sched_context_type_current(&src_sched_queue)' + * could be used to determine the need for maintaining event order for + * output. Also em_queue_get_type(src_sched_queue) could further be used + * if not caring about a potentially ended sched-context caused by an + * earlier call to em_atomic/ordered_processing_end(). + * Here, none of this is done, since every event will be buffered and + * sent out in order regardless of sched context type or queue type. + */ + + ret = odp_queue_enq_multi(tx_burst->queue, odp_events, num); + if (unlikely(ret < 0)) { + /* failure: don't return, see if a burst can be Tx anyway */ + ret = 0; + } + + prev_cnt = odp_atomic_fetch_add_u64(&tx_burst->cnt, ret); + if (prev_cnt >= MAX_PKT_BURST_TX - 1) + (void)pktio_tx_burst(tx_burst); + + if (unlikely(ret < (int)num)) + em_event_unmark_free_multi(&events[ret], num - ret); + + return ret; +} + +static inline tx_burst_t * +tx_drain_burst_acquire(void) +{ + tx_burst_t *tx_burst; + uintptr_t tx_burst_uintptr; + + int ret = odp_stash_get_ptr(pktio_shm->pktout.tx_burst_stash, + &tx_burst_uintptr, 1); + if (unlikely(ret != 1)) + return NULL; + + tx_burst = (tx_burst_t *)tx_burst_uintptr; + return tx_burst; +} + +static inline void +tx_drain_burst_release(tx_burst_t *tx_burst) { + uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; + + int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, + &tx_burst_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); +} + +/* + * User provided function to drain buffered output, + * given to EM via 'em_conf.output.output_drain_fn = pktout_drainfn;' + * The function is of type 'em_output_drain_func_t' + */ +int pktout_drainfn(void) +{ + const uint64_t curr = odp_cpu_cycles(); /* core-local timestamp */ + const uint64_t prev = pktio_locm.tx_prev_cycles; + const uint64_t diff = likely(curr >= prev) ? + curr - prev : UINT64_MAX - prev + curr + 1; + int ret = 0; + + /* TX burst queue drain */ + if (unlikely(diff > BURST_TX_DRAIN)) { + tx_burst_t *tx_drain_burst = tx_drain_burst_acquire(); + + if (tx_drain_burst) { + ret = pktio_tx_burst(tx_drain_burst); + /* Update timestamp for next round */ + pktio_locm.tx_prev_cycles = curr; + tx_drain_burst_release(tx_drain_burst); + } + } + + return ret; +} + +void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst, + em_queue_t queue) +{ + pkt_q_hash_key_t key; + int ret, idx; + + /* Store in network format to avoid conversion during Rx lookup */ + key.ip_dst = htonl(ipv4_dst); + key.port_dst = htons(port_dst); + key.proto = proto; + + odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); + + idx = pktio_shm->tbl_lookup.tbl_idx; + if (unlikely(idx != pktio_shm->rx_pkt_queues[idx].pos)) { + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + APPL_EXIT_FAILURE("tbl insertion failed, idx(%d) != pos(%d)", + idx, pktio_shm->rx_pkt_queues[idx].pos); + return; + } + + if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); + return; + } + + pktio_shm->rx_pkt_queues[idx].queue = queue; + + ret = pktio_shm->tbl_lookup.ops.f_put(pktio_shm->tbl_lookup.tbl, &key, + &pktio_shm->rx_pkt_queues[idx]); + if (likely(ret == 0)) + pktio_shm->tbl_lookup.tbl_idx++; + + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("tbl insertion failed"); +} + +int pktio_default_queue(em_queue_t queue) +{ + if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { + APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); + return -1; + } + + pktio_shm->default_queue = queue; + + return 0; +} + +em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst) +{ + em_queue_t queue; + rx_pkt_queue_t rx_pkt_queue; + int ret, pos; + /* Store in network format to avoid conversion during Rx lookup */ + pkt_q_hash_key_t key = {.ip_dst = htonl(ipv4_dst), + .port_dst = htons(port_dst), + .proto = proto}; + + /* table(hash) lookup to find queue */ + ret = pktio_shm->tbl_lookup.ops.f_get(pktio_shm->tbl_lookup.tbl, + &key, &rx_pkt_queue, + sizeof(rx_pkt_queue_t)); + + if (likely(ret == 0)) { + /* found */ + pos = rx_pkt_queue.pos; + queue = rx_pkt_queue.queue; + if (unlikely(queue != pktio_shm->rx_pkt_queues[pos].queue)) { + APPL_EXIT_FAILURE("%" PRI_QUEUE "!= %" PRI_QUEUE "", + queue, + pktio_shm->rx_pkt_queues[pos].queue); + return EM_QUEUE_UNDEF; + } + } else { + queue = EM_QUEUE_UNDEF; + } + + return queue; +} + +odp_pool_t pktio_pool_get(void) +{ + return pktio_shm->pools.pktpool_odp; +} diff --git a/programs/common/cm_pktio.h b/programs/common/cm_pktio.h index bb8966d9..287252a0 100644 --- a/programs/common/cm_pktio.h +++ b/programs/common/cm_pktio.h @@ -1,565 +1,565 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015-2024, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef CM_PKTIO_H -#define CM_PKTIO_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include - -#include -#include -#include - -#include "table.h" -#include "cuckootable.h" - -#define IPV4_PROTO_UDP ODPH_IPPROTO_UDP - -/** - * @def PKTIO_MAX_IN_QUEUES - * @brief Maximum number of odp pktio input queues per interface - */ -#define PKTIO_MAX_IN_QUEUES 32 - -/** - * @def PKTIO_MAX_OUT_QUEUES - * @brief Maximum number of odp pktio output queues per interface - */ -#define PKTIO_MAX_OUT_QUEUES 16 - -/** - * @def MAX_PKT_BURST_RX - * @brief Maximum number of packets received from a pktio input queue - * in one burst in polled pktin-mode (DIRECT_RECV, PLAIN_QUEUE) - */ -#define MAX_PKT_BURST_RX 32 - -/** - * @def MAX_PKT_BURST_TX - * @brief Maximum number of packets bursted onto a pktout queue - */ -#define MAX_PKT_BURST_TX 32 - -/** - * @def MAX_TX_BURST_BUFS - * @brief Maximum number of tx burst buffers per interface - * - * Store Tx pkts in output buffers until a buffer has 'MAX_PKT_BURST_TX' pkts, - * then transmit the whole burst of pkts instead of one by one. - */ -#define MAX_TX_BURST_BUFS EM_MAX_CORES - -/** - * @def MAX_RX_PKT_QUEUES - * @brief - */ -#define MAX_RX_PKT_QUEUES (4 * 64) - -/** - * @def MAX_RX_POLL_ROUNDS - * @brief - */ -#define MAX_RX_POLL_ROUNDS 4 - -/** - * @def BURST_TX_DRAIN - * @brief The number of core cycles between timed TX buf drain operations - */ -#define BURST_TX_DRAIN (400000ULL) /* around 200us at 2 Ghz */ - -/** - * @brief pkt header fields to use as hash key - * - * Fields from incoming packets used for destination em-odp queue lookup. - */ -struct pkt_dst_tuple { - /* uint32_t ip_src;*/ - uint32_t ip_dst; - /* uint16_t port_src;*/ - uint16_t port_dst; - uint16_t proto; -} __attribute__((__packed__)); - -/** Use the struct pkt_dst_tuple as hash key for em-odp queue lookups */ -typedef struct pkt_dst_tuple pkt_q_hash_key_t; - -/* Keep size multiple of 32-bits for faster hash-crc32 calculation*/ -ODP_STATIC_ASSERT(sizeof(pkt_q_hash_key_t) % sizeof(uint32_t) == 0, - "HASH_KEY_NOT_MULTIP_OF_32__ERROR"); - -/** - * @brief Info about em-odp queue to use, returned by hash lookup - * - * Information about an em-odp queue used for pktio, stored in a hash table and - * used when doing a tbl lookup to determine the destination em-odp queue - * for a received packet. - */ -typedef struct { - int pos; - em_queue_t queue; -} rx_pkt_queue_t; - -/** - * @brief Tx pkt burst buffer - * - * Buffer up to 'MAX_PKT_BURST_TX' pkts before bursting them all onto - * the associated 'pktout_queue' at once. - */ -typedef struct tx_burst { - /** store tx pkts temporaily in 'queue' before bursting onto tx */ - odp_queue_t queue ODP_ALIGNED_CACHE; - /** count the number of events in 'queue', updated atomically */ - odp_atomic_u64_t cnt; - /** lock needed when dequeueing from 'queue' */ - odp_spinlock_t lock; - /** store the output interface port also here for easy access */ - int if_port; - /** Transmit burst using this pktout_queue */ - odp_queue_t pktout_queue; -} tx_burst_t; - -/** - * @brief Rx pkt storage for pkts destined to the same em-odp queue - * - * Temporary storage for events to be enqueued onto the _same_ queue - * after receiving a packet burst on Rx - */ -typedef struct { - int sent; - int pkt_cnt; - em_queue_t queue; - odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; -} rx_queue_burst_t; - -/** - * @brief Pktio shared memory - * - * Collection of shared data used by pktio Rx&Tx - */ -typedef struct { - /** flag set after pktio_start() - prevent pkio rx&tx before started */ - int pktio_started; - - /** Default queue to use for incoming pkts without a dedicated queue */ - em_queue_t default_queue; - - struct { - /** EM pool for pktio, only used with '--pktpool-em' option */ - em_pool_t pktpool_em; - - /** ODP pool for pktio: - * 1. Subpool of 'pktpool_em' when using '--pktpool-em' option - * or - * 2. Direct ODP pkt pool when using '--pktpool-odp' option - */ - odp_pool_t pktpool_odp; - - /** EM vector pool for pktio, only used with '--pktin-vector' option */ - em_pool_t vecpool_em; - /** ODP vector pool for pktio: - * 1. Subpool of 'vecpool_em' when using '--vecpool-em' option - * or - * 2. Direct ODP vector pool when using '--vecpool-odp' option - */ - odp_pool_t vecpool_odp; - } pools; - - /** Packet I/O Interfaces */ - struct { - /** The number of pktio interfaces used */ - int count; - /** Interfaces created so far (up to '.count'), startup only */ - int num_created; - /** Interface indexes used */ - int idx[IF_MAX_NUM]; - /** ODP pktio handles, .pktio_hdl[idx] corresponds to idx=.idx[i] */ - odp_pktio_t pktio_hdl[IF_MAX_NUM]; - } ifs; - - /** Packet input and related resources */ - struct { - /* Packet input mode */ - pktin_mode_t in_mode; - - /** Number of input queues per interface */ - int num_queues[IF_MAX_NUM]; - - /** pktin queues used in DIRECT_RECV-mode, per interface */ - odp_pktin_queue_t pktin_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; - - /** plain event queues used in PLAIN_QUEUE-mode, per interface */ - odp_queue_t plain_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; - - /** scheduled event queues used in SCHED_...-mode, per interface */ - odp_queue_t sched_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; - /** scheduled EM event queues created from sched_queues[][] above */ - em_queue_t sched_em_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; - - /** A queue that contains pointers to the shared - * pktin_queues[][] in DIRECT_RECV-mode or to the shared - * plain_queues[][] in PLAIN_QUEUE-mode. - * Each core needs to dequeue one packet input queue to be - * able to use it to receive packets. - */ - odp_stash_t pktin_queue_stash; - } pktin; - - /** Packet output and related resources */ - struct { - /** Number of pktio output queues per interface */ - int num_queues[IF_MAX_NUM]; - - /** All pktio output queues used, per interface */ - odp_queue_t queues[IF_MAX_NUM][PKTIO_MAX_OUT_QUEUES]; - - /** A stash that contains the shared tx_burst[][] entries. - * Used when draining the available tx-burst buffers - */ - odp_stash_t tx_burst_stash; - } pktout; - - /** Info about the em-odp queues configured for pktio, store in hash */ - rx_pkt_queue_t rx_pkt_queues[MAX_RX_PKT_QUEUES]; - - /** Pkt lookup table, lookup destination em-odp queue for Rx pkts */ - struct { - table_ops_t ops; - table_t tbl; - int tbl_idx; - odp_ticketlock_t lock; - } tbl_lookup; - - /** Tx burst buffers per interface */ - tx_burst_t tx_burst[IF_MAX_NUM][MAX_TX_BURST_BUFS] ODP_ALIGNED_CACHE; -} pktio_shm_t; - -/** - * @brief Pktio core-local memory - * - * Collection of core local (not shared) data used by pktio Rx&Tx - */ -typedef struct { - /** Event contains the currently used pktio input queue */ - odp_event_t pktin_queue_event; - /** Determine need for timed drain of pktio Tx queues */ - uint64_t tx_prev_cycles; - /** Array of hash keys for the current received Rx pkt burst */ - pkt_q_hash_key_t keys[MAX_PKT_BURST_RX]; - /** Array of positions into rx_qbursts[], filled from hash lookup */ - int positions[MAX_PKT_BURST_RX]; - /** Grouping of Rx pkts per destination em-odp queue */ - rx_queue_burst_t rx_qbursts[MAX_RX_PKT_QUEUES + 1]; /* +1=default Q */ - /** Temporary storage of Tx pkt burst */ - odp_event_t ev_burst[MAX_PKT_BURST_TX]; -} pktio_locm_t; - -/** - * Reserve shared memory for pktio - * - * Must be called once at startup. Additionally each EM-core needs to call the - * pktio_mem_lookup() function before using any further pktio resources. - */ -void pktio_mem_reserve(void); - -/** - * Lookup shared memory for pktio - * - * Must be called once by each EM-core before using any further pktio resources. - * - * @param is_thread_per_core true: EM running in thread-per-core mode - * false: EM running in process-per-core mode - */ -void pktio_mem_lookup(bool is_thread_per_core); - -void pktio_mem_free(void); - -void pktio_pool_create(int if_count, bool pktpool_em, - bool pktin_vector, bool vecpool_em); -void pktio_pool_destroy(bool pktpool_em, bool pktin_vector, bool vecpool_em); - -void pktio_init(const appl_conf_t *appl_conf); -void pktio_deinit(const appl_conf_t *appl_conf); - -int pktio_create(const char *dev, pktin_mode_t in_mode, bool pktin_vector, - int if_count, int num_workers); -void pktio_start(void); -void pktio_halt(void); -void pktio_stop(void); -void pktio_close(void); - -const char *pktin_mode_str(pktin_mode_t in_mode); -bool pktin_polled_mode(pktin_mode_t in_mode); -bool pktin_sched_mode(pktin_mode_t in_mode); - -/** - * @brief Poll input resources for pkts/events in DIRECT_RECV-mode - * and enqueue into EM queues. - * - * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on - * each core in the dispatch loop. - * The function is of type 'em_input_poll_func_t' - * - * @return number of pkts/events received from input and enqueued into EM - */ -int pktin_pollfn_direct(void); - -/** - * @brief Poll input resources for pkts/events in PLAIN_QUEUE-mode - * and enqueue into EM queues. - * - * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on - * each core in the dispatch loop. - * The function is of type 'em_input_poll_func_t' - * - * @return number of pkts/events received from input and enqueued into EM - */ -int pktin_pollfn_plainqueue(void); - -/** - * @brief Drain buffered output - ensure low rate flows are also sent out. - * - * Useful in situations where output is buffered and sent out in bursts when - * enough output has been gathered - single events or low rate flows may, - * without this function, never be sent out (or too late) if the buffering - * threshold has not been reached. - * - * Given to EM via 'em_conf.output.output_drain_fn' - EM will call this on - * each core in the dispatch loop. - * The function is of type 'em_output_drain_func_t' - * - * @return number of events successfully drained and sent for output - */ -int pktout_drainfn(void); - -/** - * @brief User provided EM output-queue callback function ('em_output_func_t') - * - * Transmit events(pkts) using the given config onto Eth-tx - * - * Buffers the given 'events' in a Tx burst buffer and when full transmits - * the whole burst from the buffer at once. - * - * @param events[] Events to be sent - * @param num Number of entries in 'events[]' - * @param output_queue EM output queue the events were sent into (em_send*()) - * @param output_fn_args Function args specific to the output-queue - * Note: here it will be a 'pktio_tx_fn_args_t' pointer - * - * @return number of events successfully sent (equal to num if all successful) - */ -int pktio_tx(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args); -/** - * @typedef pktio_tx_fn_args_t - * User defined arguments to the EM output queue callback function - */ -typedef struct { - /** Pktio Tx interface ID */ - int if_id; - /* add more if needed */ -} pktio_tx_fn_args_t; - -/** - * Associate an EM-queue with a packet-I/O flow. - * - * Received packets matching the set destination IP-addr/port - * will end up in the EM-queue 'queue'. - */ -void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, - em_queue_t queue); - -/** - * Remove the association between a packet-IO flow and an EM-queue. - * - * No further received frames will end up in the EM-queue 'queue' - */ -void pktio_rem_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, - em_queue_t queue); - -/** - * Set the default EM-queue for packet I/O - */ -int pktio_default_queue(em_queue_t queue); - -/** - * Provide applications a way to do a hash-lookup (e.g. sanity check etc.) - */ -em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, - uint16_t l4_port_dst); - -odp_pool_t pktio_pool_get(void); - -static inline int -pktio_input_port(em_event_t event) -{ - const odp_event_t odp_event = em_odp_event2odp(event); - const odp_packet_t pkt = odp_packet_from_event(odp_event); - const int input_port = odp_packet_input_index(pkt); - - if (unlikely(input_port < 0)) - return 0; - - return input_port; -} - -/** - * Get the protocol, IPv4 destination address and destination L4 port the - * packet-event was sent to. - */ -static inline void -pktio_get_dst(em_event_t pktev, uint8_t *proto__out, - uint32_t *ipv4_dst__out, uint16_t *l4_port_dst__out) -{ - /* if (odp_packet_has_ipv4(pkt)) { - * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); - * *proto__out = ip->proto; - * *ipv4_dst__out = ntohl(ip->dst_addr); - * } else { - * *proto__out = 0; - * *ipv4_dst__out = 0; - * } - * - * if (odp_packet_has_udp(pkt)) { - * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); - * *port_dst__out = ntohs(udp->dst_port); - * } else { - * *port_dst__out = 0; - * } - */ - - /* Note: no actual checks if the headers are present */ - void *pkt_data = em_packet_pointer(pktev); - odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); - odph_udphdr_t *udp = (odph_udphdr_t *)((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); - - *proto__out = ip->proto; - *ipv4_dst__out = ntohl(ip->dst_addr); - *l4_port_dst__out = ntohs(udp->dst_port); -} - -static inline void -pktio_swap_eth_addrs(em_event_t pktev) -{ - odph_ethhdr_t *const eth = em_packet_pointer(pktev); - const odph_ethaddr_t eth_tmp_addr = eth->dst; - - eth->dst = eth->src; - eth->src = eth_tmp_addr; -} - -static inline void -pktio_swap_addrs(em_event_t pktev) -{ - /* - * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L2 - * if (odp_packet_has_eth(pkt)) { - * eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL); - * eth_tmp_addr = eth->dst; - * eth->dst = eth->src; - * eth->src = eth_tmp_addr; - * } - * - * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L3 - * if (odp_packet_has_ipv4(pkt)) { - * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); - * ip_tmp_addr = ip->src_addr; - * ip->src_addr = ip->dst_addr; - * ip->dst_addr = ip_tmp_addr; - * } - * - * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L4 - * if (odp_packet_has_udp(pkt)) { - * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); - * udp_tmp_port = udp->src_port; - * udp->src_port = udp->dst_port; - * udp->dst_port = udp_tmp_port; - * } - */ - - /* Note: no actual checks if headers are present */ - void *pkt_data = em_packet_pointer(pktev); - odph_ethhdr_t *eth = (odph_ethhdr_t *)pkt_data; - odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); - odph_udphdr_t *udp = (odph_udphdr_t *)((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); - - odph_ethaddr_t eth_tmp_addr = eth->dst; - odp_u32be_t ip_tmp_addr = ip->src_addr; - odp_u16be_t udp_tmp_port = udp->src_port; - - eth->dst = eth->src; - eth->src = eth_tmp_addr; - - ip->src_addr = ip->dst_addr; - ip->dst_addr = ip_tmp_addr; - - udp->src_port = udp->dst_port; - udp->dst_port = udp_tmp_port; -} - -static inline em_event_t -pktio_copy_event(em_event_t event) -{ - return em_event_clone(event, EM_POOL_UNDEF); -} - -/** - * Convert an IP-address to ascii string format. - */ -static inline void -ipaddr_tostr(uint32_t ip_addr, char *const ip_addr_str__out, int strlen) -{ - unsigned char *const ucp = (unsigned char *)&ip_addr; - -#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN - snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", - ucp[3] & 0xff, ucp[2] & 0xff, ucp[1] & 0xff, ucp[0] & 0xff); -#elif ODP_BYTE_ORDER == ODP_BIG_ENDIAN - snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", - ucp[0] & 0xff, ucp[1] & 0xff, ucp[2] & 0xff, ucp[3] & 0xff); -#else - #error ODP_BYTE_ORDER invalid -#endif - - ip_addr_str__out[strlen - 1] = '\0'; -} - -#ifdef __cplusplus -} -#endif - -#endif /* CM_PKTIO_H */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015-2024, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CM_PKTIO_H +#define CM_PKTIO_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +#include +#include +#include + +#include "table.h" +#include "cuckootable.h" + +#define IPV4_PROTO_UDP ODPH_IPPROTO_UDP + +/** + * @def PKTIO_MAX_IN_QUEUES + * @brief Maximum number of odp pktio input queues per interface + */ +#define PKTIO_MAX_IN_QUEUES 32 + +/** + * @def PKTIO_MAX_OUT_QUEUES + * @brief Maximum number of odp pktio output queues per interface + */ +#define PKTIO_MAX_OUT_QUEUES 16 + +/** + * @def MAX_PKT_BURST_RX + * @brief Maximum number of packets received from a pktio input queue + * in one burst in polled pktin-mode (DIRECT_RECV, PLAIN_QUEUE) + */ +#define MAX_PKT_BURST_RX 32 + +/** + * @def MAX_PKT_BURST_TX + * @brief Maximum number of packets bursted onto a pktout queue + */ +#define MAX_PKT_BURST_TX 32 + +/** + * @def MAX_TX_BURST_BUFS + * @brief Maximum number of tx burst buffers per interface + * + * Store Tx pkts in output buffers until a buffer has 'MAX_PKT_BURST_TX' pkts, + * then transmit the whole burst of pkts instead of one by one. + */ +#define MAX_TX_BURST_BUFS EM_MAX_CORES + +/** + * @def MAX_RX_PKT_QUEUES + * @brief + */ +#define MAX_RX_PKT_QUEUES (4 * 64) + +/** + * @def MAX_RX_POLL_ROUNDS + * @brief + */ +#define MAX_RX_POLL_ROUNDS 4 + +/** + * @def BURST_TX_DRAIN + * @brief The number of core cycles between timed TX buf drain operations + */ +#define BURST_TX_DRAIN (400000ULL) /* around 200us at 2 Ghz */ + +/** + * @brief pkt header fields to use as hash key + * + * Fields from incoming packets used for destination em-odp queue lookup. + */ +struct pkt_dst_tuple { + /* uint32_t ip_src;*/ + uint32_t ip_dst; + /* uint16_t port_src;*/ + uint16_t port_dst; + uint16_t proto; +} __attribute__((__packed__)); + +/** Use the struct pkt_dst_tuple as hash key for em-odp queue lookups */ +typedef struct pkt_dst_tuple pkt_q_hash_key_t; + +/* Keep size multiple of 32-bits for faster hash-crc32 calculation*/ +ODP_STATIC_ASSERT(sizeof(pkt_q_hash_key_t) % sizeof(uint32_t) == 0, + "HASH_KEY_NOT_MULTIP_OF_32__ERROR"); + +/** + * @brief Info about em-odp queue to use, returned by hash lookup + * + * Information about an em-odp queue used for pktio, stored in a hash table and + * used when doing a tbl lookup to determine the destination em-odp queue + * for a received packet. + */ +typedef struct { + int pos; + em_queue_t queue; +} rx_pkt_queue_t; + +/** + * @brief Tx pkt burst buffer + * + * Buffer up to 'MAX_PKT_BURST_TX' pkts before bursting them all onto + * the associated 'pktout_queue' at once. + */ +typedef struct tx_burst { + /** store tx pkts temporarily in 'queue' before bursting onto tx */ + odp_queue_t queue ODP_ALIGNED_CACHE; + /** count the number of events in 'queue', updated atomically */ + odp_atomic_u64_t cnt; + /** lock needed when dequeueing from 'queue' */ + odp_spinlock_t lock; + /** store the output interface port also here for easy access */ + int if_port; + /** Transmit burst using this pktout_queue */ + odp_queue_t pktout_queue; +} tx_burst_t; + +/** + * @brief Rx pkt storage for pkts destined to the same em-odp queue + * + * Temporary storage for events to be enqueued onto the _same_ queue + * after receiving a packet burst on Rx + */ +typedef struct { + int sent; + int pkt_cnt; + em_queue_t queue; + odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; +} rx_queue_burst_t; + +/** + * @brief Pktio shared memory + * + * Collection of shared data used by pktio Rx&Tx + */ +typedef struct { + /** flag set after pktio_start() - prevent pkio rx&tx before started */ + int pktio_started; + + /** Default queue to use for incoming pkts without a dedicated queue */ + em_queue_t default_queue; + + struct { + /** EM pool for pktio, only used with '--pktpool-em' option */ + em_pool_t pktpool_em; + + /** ODP pool for pktio: + * 1. Subpool of 'pktpool_em' when using '--pktpool-em' option + * or + * 2. Direct ODP pkt pool when using '--pktpool-odp' option + */ + odp_pool_t pktpool_odp; + + /** EM vector pool for pktio, only used with '--pktin-vector' option */ + em_pool_t vecpool_em; + /** ODP vector pool for pktio: + * 1. Subpool of 'vecpool_em' when using '--vecpool-em' option + * or + * 2. Direct ODP vector pool when using '--vecpool-odp' option + */ + odp_pool_t vecpool_odp; + } pools; + + /** Packet I/O Interfaces */ + struct { + /** The number of pktio interfaces used */ + int count; + /** Interfaces created so far (up to '.count'), startup only */ + int num_created; + /** Interface indexes used */ + int idx[IF_MAX_NUM]; + /** ODP pktio handles, .pktio_hdl[idx] corresponds to idx=.idx[i] */ + odp_pktio_t pktio_hdl[IF_MAX_NUM]; + } ifs; + + /** Packet input and related resources */ + struct { + /* Packet input mode */ + pktin_mode_t in_mode; + + /** Number of input queues per interface */ + int num_queues[IF_MAX_NUM]; + + /** pktin queues used in DIRECT_RECV-mode, per interface */ + odp_pktin_queue_t pktin_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + + /** plain event queues used in PLAIN_QUEUE-mode, per interface */ + odp_queue_t plain_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + + /** scheduled event queues used in SCHED_...-mode, per interface */ + odp_queue_t sched_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + /** scheduled EM event queues created from sched_queues[][] above */ + em_queue_t sched_em_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + + /** A queue that contains pointers to the shared + * pktin_queues[][] in DIRECT_RECV-mode or to the shared + * plain_queues[][] in PLAIN_QUEUE-mode. + * Each core needs to dequeue one packet input queue to be + * able to use it to receive packets. + */ + odp_stash_t pktin_queue_stash; + } pktin; + + /** Packet output and related resources */ + struct { + /** Number of pktio output queues per interface */ + int num_queues[IF_MAX_NUM]; + + /** All pktio output queues used, per interface */ + odp_queue_t queues[IF_MAX_NUM][PKTIO_MAX_OUT_QUEUES]; + + /** A stash that contains the shared tx_burst[][] entries. + * Used when draining the available tx-burst buffers + */ + odp_stash_t tx_burst_stash; + } pktout; + + /** Info about the em-odp queues configured for pktio, store in hash */ + rx_pkt_queue_t rx_pkt_queues[MAX_RX_PKT_QUEUES]; + + /** Pkt lookup table, lookup destination em-odp queue for Rx pkts */ + struct { + table_ops_t ops; + table_t tbl; + int tbl_idx; + odp_ticketlock_t lock; + } tbl_lookup; + + /** Tx burst buffers per interface */ + tx_burst_t tx_burst[IF_MAX_NUM][MAX_TX_BURST_BUFS] ODP_ALIGNED_CACHE; +} pktio_shm_t; + +/** + * @brief Pktio core-local memory + * + * Collection of core local (not shared) data used by pktio Rx&Tx + */ +typedef struct { + /** Event contains the currently used pktio input queue */ + odp_event_t pktin_queue_event; + /** Determine need for timed drain of pktio Tx queues */ + uint64_t tx_prev_cycles; + /** Array of hash keys for the current received Rx pkt burst */ + pkt_q_hash_key_t keys[MAX_PKT_BURST_RX]; + /** Array of positions into rx_qbursts[], filled from hash lookup */ + int positions[MAX_PKT_BURST_RX]; + /** Grouping of Rx pkts per destination em-odp queue */ + rx_queue_burst_t rx_qbursts[MAX_RX_PKT_QUEUES + 1]; /* +1=default Q */ + /** Temporary storage of Tx pkt burst */ + odp_event_t ev_burst[MAX_PKT_BURST_TX]; +} pktio_locm_t; + +/** + * Reserve shared memory for pktio + * + * Must be called once at startup. Additionally each EM-core needs to call the + * pktio_mem_lookup() function before using any further pktio resources. + */ +void pktio_mem_reserve(void); + +/** + * Lookup shared memory for pktio + * + * Must be called once by each EM-core before using any further pktio resources. + * + * @param is_thread_per_core true: EM running in thread-per-core mode + * false: EM running in process-per-core mode + */ +void pktio_mem_lookup(bool is_thread_per_core); + +void pktio_mem_free(void); + +void pktio_pool_create(int if_count, bool pktpool_em, + bool pktin_vector, bool vecpool_em); +void pktio_pool_destroy(bool pktpool_em, bool pktin_vector, bool vecpool_em); + +void pktio_init(const appl_conf_t *appl_conf); +void pktio_deinit(const appl_conf_t *appl_conf); + +int pktio_create(const char *dev, pktin_mode_t in_mode, bool pktin_vector, + int if_count, int num_workers); +void pktio_start(void); +void pktio_halt(void); +void pktio_stop(void); +void pktio_close(void); + +const char *pktin_mode_str(pktin_mode_t in_mode); +bool pktin_polled_mode(pktin_mode_t in_mode); +bool pktin_sched_mode(pktin_mode_t in_mode); + +/** + * @brief Poll input resources for pkts/events in DIRECT_RECV-mode + * and enqueue into EM queues. + * + * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on + * each core in the dispatch loop. + * The function is of type 'em_input_poll_func_t' + * + * @return number of pkts/events received from input and enqueued into EM + */ +int pktin_pollfn_direct(void); + +/** + * @brief Poll input resources for pkts/events in PLAIN_QUEUE-mode + * and enqueue into EM queues. + * + * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on + * each core in the dispatch loop. + * The function is of type 'em_input_poll_func_t' + * + * @return number of pkts/events received from input and enqueued into EM + */ +int pktin_pollfn_plainqueue(void); + +/** + * @brief Drain buffered output - ensure low rate flows are also sent out. + * + * Useful in situations where output is buffered and sent out in bursts when + * enough output has been gathered - single events or low rate flows may, + * without this function, never be sent out (or too late) if the buffering + * threshold has not been reached. + * + * Given to EM via 'em_conf.output.output_drain_fn' - EM will call this on + * each core in the dispatch loop. + * The function is of type 'em_output_drain_func_t' + * + * @return number of events successfully drained and sent for output + */ +int pktout_drainfn(void); + +/** + * @brief User provided EM output-queue callback function ('em_output_func_t') + * + * Transmit events(pkts) using the given config onto Eth-tx + * + * Buffers the given 'events' in a Tx burst buffer and when full transmits + * the whole burst from the buffer at once. + * + * @param events[] Events to be sent + * @param num Number of entries in 'events[]' + * @param output_queue EM output queue the events were sent into (em_send*()) + * @param output_fn_args Function args specific to the output-queue + * Note: here it will be a 'pktio_tx_fn_args_t' pointer + * + * @return number of events successfully sent (equal to num if all successful) + */ +int pktio_tx(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args); +/** + * @typedef pktio_tx_fn_args_t + * User defined arguments to the EM output queue callback function + */ +typedef struct { + /** Pktio Tx interface ID */ + int if_id; + /* add more if needed */ +} pktio_tx_fn_args_t; + +/** + * Associate an EM-queue with a packet-I/O flow. + * + * Received packets matching the set destination IP-addr/port + * will end up in the EM-queue 'queue'. + */ +void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, + em_queue_t queue); + +/** + * Remove the association between a packet-IO flow and an EM-queue. + * + * No further received frames will end up in the EM-queue 'queue' + */ +void pktio_rem_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, + em_queue_t queue); + +/** + * Set the default EM-queue for packet I/O + */ +int pktio_default_queue(em_queue_t queue); + +/** + * Provide applications a way to do a hash-lookup (e.g. sanity check etc.) + */ +em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, + uint16_t l4_port_dst); + +odp_pool_t pktio_pool_get(void); + +static inline int +pktio_input_port(em_event_t event) +{ + const odp_event_t odp_event = em_odp_event2odp(event); + const odp_packet_t pkt = odp_packet_from_event(odp_event); + const int input_port = odp_packet_input_index(pkt); + + if (unlikely(input_port < 0)) + return 0; + + return input_port; +} + +/** + * Get the protocol, IPv4 destination address and destination L4 port the + * packet-event was sent to. + */ +static inline void +pktio_get_dst(em_event_t pktev, uint8_t *proto__out, + uint32_t *ipv4_dst__out, uint16_t *l4_port_dst__out) +{ + /* if (odp_packet_has_ipv4(pkt)) { + * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); + * *proto__out = ip->proto; + * *ipv4_dst__out = ntohl(ip->dst_addr); + * } else { + * *proto__out = 0; + * *ipv4_dst__out = 0; + * } + * + * if (odp_packet_has_udp(pkt)) { + * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); + * *port_dst__out = ntohs(udp->dst_port); + * } else { + * *port_dst__out = 0; + * } + */ + + /* Note: no actual checks if the headers are present */ + void *pkt_data = em_packet_pointer(pktev); + odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); + odph_udphdr_t *udp = (odph_udphdr_t *)((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); + + *proto__out = ip->proto; + *ipv4_dst__out = ntohl(ip->dst_addr); + *l4_port_dst__out = ntohs(udp->dst_port); +} + +static inline void +pktio_swap_eth_addrs(em_event_t pktev) +{ + odph_ethhdr_t *const eth = em_packet_pointer(pktev); + const odph_ethaddr_t eth_tmp_addr = eth->dst; + + eth->dst = eth->src; + eth->src = eth_tmp_addr; +} + +static inline void +pktio_swap_addrs(em_event_t pktev) +{ + /* + * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L2 + * if (odp_packet_has_eth(pkt)) { + * eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL); + * eth_tmp_addr = eth->dst; + * eth->dst = eth->src; + * eth->src = eth_tmp_addr; + * } + * + * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L3 + * if (odp_packet_has_ipv4(pkt)) { + * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); + * ip_tmp_addr = ip->src_addr; + * ip->src_addr = ip->dst_addr; + * ip->dst_addr = ip_tmp_addr; + * } + * + * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L4 + * if (odp_packet_has_udp(pkt)) { + * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); + * udp_tmp_port = udp->src_port; + * udp->src_port = udp->dst_port; + * udp->dst_port = udp_tmp_port; + * } + */ + + /* Note: no actual checks if headers are present */ + void *pkt_data = em_packet_pointer(pktev); + odph_ethhdr_t *eth = (odph_ethhdr_t *)pkt_data; + odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); + odph_udphdr_t *udp = (odph_udphdr_t *)((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); + + odph_ethaddr_t eth_tmp_addr = eth->dst; + odp_u32be_t ip_tmp_addr = ip->src_addr; + odp_u16be_t udp_tmp_port = udp->src_port; + + eth->dst = eth->src; + eth->src = eth_tmp_addr; + + ip->src_addr = ip->dst_addr; + ip->dst_addr = ip_tmp_addr; + + udp->src_port = udp->dst_port; + udp->dst_port = udp_tmp_port; +} + +static inline em_event_t +pktio_copy_event(em_event_t event) +{ + return em_event_clone(event, EM_POOL_UNDEF); +} + +/** + * Convert an IP-address to ascii string format. + */ +static inline void +ipaddr_tostr(uint32_t ip_addr, char *const ip_addr_str__out, int strlen) +{ + unsigned char *const ucp = (unsigned char *)&ip_addr; + +#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN + snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", + ucp[3] & 0xff, ucp[2] & 0xff, ucp[1] & 0xff, ucp[0] & 0xff); +#elif ODP_BYTE_ORDER == ODP_BIG_ENDIAN + snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", + ucp[0] & 0xff, ucp[1] & 0xff, ucp[2] & 0xff, ucp[3] & 0xff); +#else + #error ODP_BYTE_ORDER invalid +#endif + + ip_addr_str__out[strlen - 1] = '\0'; +} + +#ifdef __cplusplus +} +#endif + +#endif /* CM_PKTIO_H */ diff --git a/programs/common/cm_setup.h b/programs/common/cm_setup.h index dcc18f7d..e7ca874a 100644 --- a/programs/common/cm_setup.h +++ b/programs/common/cm_setup.h @@ -1,248 +1,248 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef CM_SETUP_H -#define CM_SETUP_H - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include -#include -#include - -#define APPL_NAME_LEN (64) - -#define APPL_POOLS_MAX (16) - -#define PLAT_PARAM_SIZE (8) - -#define MAX_THREADS (128) - -#define IF_NAME_LEN (16) - -#define IF_MAX_NUM (8) - -/** Get rid of path in filename - only for unix-type paths using '/' */ -#define NO_PATH(file_name) (strrchr((file_name), '/') ? \ - strrchr((file_name), '/') + 1 : (file_name)) - -#define APPL_LOG(level, ...) appl_log((level), ## __VA_ARGS__) -#define APPL_VLOG(level, fmt, args) appl_vlog((level), (fmt), (args)) -#define APPL_PRINT(...) APPL_LOG(EM_LOG_PRINT, ## __VA_ARGS__) - -/** Simple appl error handling: log & exit */ -#define APPL_EXIT_FAILURE(...) do { \ - appl_log(EM_LOG_ERR, \ - "Appl Error: %s:%i, %s() - ", \ - NO_PATH(__FILE__), __LINE__, __func__); \ - appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ - appl_log(EM_LOG_ERR, "\n\n"); \ - exit(EXIT_FAILURE); \ -} while (0) - -#define APPL_ERROR(...) do { \ - appl_log(EM_LOG_ERR, \ - "Appl Error: %s:%i, %s() - ", \ - NO_PATH(__FILE__), __LINE__, __func__); \ - appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ - appl_log(EM_LOG_ERR, "\n\n"); \ -} while (0) - -/** - * Application synchronization - */ -typedef struct { - /** Startup synchronization barrier */ - odp_barrier_t start_barrier; - /** Exit / termination synchronization barrier */ - odp_barrier_t exit_barrier; - /** Enter counter for tracking core / odp-thread startup */ - env_atomic64_t enter_count; - /** Exit counter for tracking core / odp-thread exit */ - env_atomic64_t exit_count; -} sync_t; - -/** - * @brief Application startup mode - * - * Enables testing of different startup scenarios. - */ -typedef enum startup_mode { - /** - * Start up & initialize all EM cores before setting up the - * application using EM APIs. The em_init() function has been run and - * all EM-cores have run em_init_core() before application setup. - * Option: -s, --startup-mode = 0 (All EM-cores before application) - */ - STARTUP_ALL_CORES = 0, - /** - * Start up & initialize only one EM core before setting up the - * application using EM APIs. The em_init() function has been run and - * only one EM-core has run em_init_core() before application setup. - * Option: -s, --startup-mode = 1 (One EM-core before application...)) - */ - STARTUP_ONE_CORE_FIRST -} startup_mode_t; - -/** - * @brief Packet input mode - * - * Enables testing different packet-IO input modes - */ -typedef enum pktin_mode_t { - DIRECT_RECV, - PLAIN_QUEUE, - SCHED_PARALLEL, - SCHED_ATOMIC, - SCHED_ORDERED -} pktin_mode_t; - -/** - * @brief Application packet I/O configuration - */ -typedef struct { - /** Packet input mode */ - pktin_mode_t in_mode; - /** Interface count */ - int if_count; - /** Interface names + placeholder for '\0' */ - char if_name[IF_MAX_NUM][IF_NAME_LEN + 1]; - /** Interface identifiers corresponding to 'if_name[]' */ - int if_ids[IF_MAX_NUM]; - /** - * Pktio is setup with an EM event-pool: 'true' - * Pktio is setup with an ODP pkt-pool: 'false' - */ - bool pktpool_em; - - /** Packet input vectors enabled (true/false) */ - bool pktin_vector; - /** - * If pktin_vector: - * Pktio is setup with an EM vector-pool: 'true' - * Pktio is setup with an ODP vector-pool: 'false' - */ - bool vecpool_em; -} pktio_conf_t; - -/** - * @brief Application configuration - */ -typedef struct { - /** application name */ - char name[APPL_NAME_LEN]; - /** Number of EM cores to create */ - unsigned int core_count; - /** number of processes */ - unsigned int num_procs; - /** number of threads */ - unsigned int num_threads; - /** Start-up mode */ - startup_mode_t startup_mode; - - /** dispatch rounds before returning: using em_dispatch(dispatch_rounds) */ - uint64_t dispatch_rounds; - - /** dispatch with options using em_dispatch_duration(duration, opt, ...) */ - struct { - /** Use em_dispatch_duration() function: true/false */ - bool in_use; - em_dispatch_duration_t duration; - em_dispatch_opt_t opt; - } dispatch_duration; - - /** number of memory pools set up for the application */ - unsigned int num_pools; - /** pool ids of the created application pools */ - em_pool_t pools[APPL_POOLS_MAX]; - - /** Packet I/O parameters */ - pktio_conf_t pktio; -} appl_conf_t; - -/** Application shared memory - allocate in single chunk */ -typedef struct { - /** EM configuration*/ - em_conf_t em_conf; - /** Application configuration */ - appl_conf_t appl_conf; - /** Exit the EM-core dispatch loop if set to 1, set by SIGINT handler */ - sig_atomic_t exit_flag; - /** ODP-thread table (from shared memory for process-per-core mode) */ - odph_thread_t thread_tbl[MAX_THREADS]; - /** Application synchronization vars */ - sync_t sync ENV_CACHE_LINE_ALIGNED; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} appl_shm_t; - -/** - * Global pointer to common application shared memory - */ -extern appl_shm_t *appl_shm; - -/** - * Common setup function for the appliations, - * usually called directly from main(). - */ -int cm_setup(int argc, char *argv[]); - -/** - * All examples implement the test_init(), test_start(), test_stop() and - * test_term() functions to keep common main() function. - */ -void test_init(const appl_conf_t *appl_conf); - -void test_start(const appl_conf_t *appl_conf); - -void test_stop(const appl_conf_t *appl_conf); - -void test_term(const appl_conf_t *appl_conf); - -int appl_vlog(em_log_level_t level, const char *fmt, va_list args); - -__attribute__((format(printf, 2, 3))) -int appl_log(em_log_level_t level, const char *fmt, ...); - -void delay_spin(const uint64_t spin_count); - -#ifdef __cplusplus -} -#endif - -#endif +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CM_SETUP_H +#define CM_SETUP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include + +#define APPL_NAME_LEN (64) + +#define APPL_POOLS_MAX (16) + +#define PLAT_PARAM_SIZE (8) + +#define MAX_THREADS (128) + +#define IF_NAME_LEN (16) + +#define IF_MAX_NUM (8) + +/** Get rid of path in filename - only for unix-type paths using '/' */ +#define NO_PATH(file_name) (strrchr((file_name), '/') ? \ + strrchr((file_name), '/') + 1 : (file_name)) + +#define APPL_LOG(level, ...) appl_log((level), ## __VA_ARGS__) +#define APPL_VLOG(level, fmt, args) appl_vlog((level), (fmt), (args)) +#define APPL_PRINT(...) APPL_LOG(EM_LOG_PRINT, ## __VA_ARGS__) + +/** Simple appl error handling: log & exit */ +#define APPL_EXIT_FAILURE(...) do { \ + appl_log(EM_LOG_ERR, \ + "Appl Error: %s:%i, %s() - ", \ + NO_PATH(__FILE__), __LINE__, __func__); \ + appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ + appl_log(EM_LOG_ERR, "\n\n"); \ + exit(EXIT_FAILURE); \ +} while (0) + +#define APPL_ERROR(...) do { \ + appl_log(EM_LOG_ERR, \ + "Appl Error: %s:%i, %s() - ", \ + NO_PATH(__FILE__), __LINE__, __func__); \ + appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ + appl_log(EM_LOG_ERR, "\n\n"); \ +} while (0) + +/** + * Application synchronization + */ +typedef struct { + /** Startup synchronization barrier */ + odp_barrier_t start_barrier; + /** Exit / termination synchronization barrier */ + odp_barrier_t exit_barrier; + /** Enter counter for tracking core / odp-thread startup */ + env_atomic64_t enter_count; + /** Exit counter for tracking core / odp-thread exit */ + env_atomic64_t exit_count; +} sync_t; + +/** + * @brief Application startup mode + * + * Enables testing of different startup scenarios. + */ +typedef enum startup_mode { + /** + * Start up & initialize all EM cores before setting up the + * application using EM APIs. The em_init() function has been run and + * all EM-cores have run em_init_core() before application setup. + * Option: -s, --startup-mode = 0 (All EM-cores before application) + */ + STARTUP_ALL_CORES = 0, + /** + * Start up & initialize only one EM core before setting up the + * application using EM APIs. The em_init() function has been run and + * only one EM-core has run em_init_core() before application setup. + * Option: -s, --startup-mode = 1 (One EM-core before application...)) + */ + STARTUP_ONE_CORE_FIRST +} startup_mode_t; + +/** + * @brief Packet input mode + * + * Enables testing different packet-IO input modes + */ +typedef enum pktin_mode_t { + DIRECT_RECV, + PLAIN_QUEUE, + SCHED_PARALLEL, + SCHED_ATOMIC, + SCHED_ORDERED +} pktin_mode_t; + +/** + * @brief Application packet I/O configuration + */ +typedef struct { + /** Packet input mode */ + pktin_mode_t in_mode; + /** Interface count */ + int if_count; + /** Interface names + placeholder for '\0' */ + char if_name[IF_MAX_NUM][IF_NAME_LEN + 1]; + /** Interface identifiers corresponding to 'if_name[]' */ + int if_ids[IF_MAX_NUM]; + /** + * Pktio is setup with an EM event-pool: 'true' + * Pktio is setup with an ODP pkt-pool: 'false' + */ + bool pktpool_em; + + /** Packet input vectors enabled (true/false) */ + bool pktin_vector; + /** + * If pktin_vector: + * Pktio is setup with an EM vector-pool: 'true' + * Pktio is setup with an ODP vector-pool: 'false' + */ + bool vecpool_em; +} pktio_conf_t; + +/** + * @brief Application configuration + */ +typedef struct { + /** application name */ + char name[APPL_NAME_LEN]; + /** Number of EM cores to create */ + unsigned int core_count; + /** number of processes */ + unsigned int num_procs; + /** number of threads */ + unsigned int num_threads; + /** Start-up mode */ + startup_mode_t startup_mode; + + /** dispatch rounds before returning: using em_dispatch(dispatch_rounds) */ + uint64_t dispatch_rounds; + + /** dispatch with options using em_dispatch_duration(duration, opt, ...) */ + struct { + /** Use em_dispatch_duration() function: true/false */ + bool in_use; + em_dispatch_duration_t duration; + em_dispatch_opt_t opt; + } dispatch_duration; + + /** number of memory pools set up for the application */ + unsigned int num_pools; + /** pool ids of the created application pools */ + em_pool_t pools[APPL_POOLS_MAX]; + + /** Packet I/O parameters */ + pktio_conf_t pktio; +} appl_conf_t; + +/** Application shared memory - allocate in single chunk */ +typedef struct { + /** EM configuration*/ + em_conf_t em_conf; + /** Application configuration */ + appl_conf_t appl_conf; + /** Exit the EM-core dispatch loop if set to 1, set by SIGINT handler */ + sig_atomic_t exit_flag; + /** ODP-thread table (from shared memory for process-per-core mode) */ + odph_thread_t thread_tbl[MAX_THREADS]; + /** Application synchronization vars */ + sync_t sync ENV_CACHE_LINE_ALIGNED; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} appl_shm_t; + +/** + * Global pointer to common application shared memory + */ +extern appl_shm_t *appl_shm; + +/** + * Common setup function for the applications, + * usually called directly from main(). + */ +int cm_setup(int argc, char *argv[]); + +/** + * All examples implement the test_init(), test_start(), test_stop() and + * test_term() functions to keep common main() function. + */ +void test_init(const appl_conf_t *appl_conf); + +void test_start(const appl_conf_t *appl_conf); + +void test_stop(const appl_conf_t *appl_conf); + +void test_term(const appl_conf_t *appl_conf); + +int appl_vlog(em_log_level_t level, const char *fmt, va_list args); + +__attribute__((format(printf, 2, 3))) +int appl_log(em_log_level_t level, const char *fmt, ...); + +void delay_spin(const uint64_t spin_count); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/programs/common/cuckootable.c b/programs/common/cuckootable.c index 0c7ada22..c36c59d0 100644 --- a/programs/common/cuckootable.c +++ b/programs/common/cuckootable.c @@ -1,757 +1,757 @@ -/* - * Copyright (c) 2024, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright (c) 2016-2018 Linaro Limited - */ - -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include - -#include -#include - -#include "table.h" -#include "cuckootable.h" - -/** - * @magic word, write to the first byte of the memory block - * to indicate this block is used by a cuckoo hash table - */ -#define CUCKOO_TABLE_MAGIC_WORD 0xDFDFFDFD - -/** Number of items per bucket. */ -#define HASH_BUCKET_ENTRIES 4 - -#define NULL_SIGNATURE 0 -#define KEY_ALIGNMENT 16 - -/** Maximum size of hash table that can be created. */ -#define HASH_ENTRIES_MAX 1048576 - -/** - * @internal signature struct - * Structure storing both primary and secondary hashes - */ -struct cuckoo_table_signatures { - union { - struct { - uint32_t current; - uint32_t alt; - }; - uint64_t sig; - }; -}; - -/** - * @internal key-value struct - * Structure that stores key-value pair - */ -struct cuckoo_table_key_value { - uint8_t *key; - uint8_t *value; -}; - -/** - * @internal bucket structure - * Put the elements with different keys but same signature into a bucket. - * Each bucket has at most HASH_BUCKET_ENTRIES elements. - */ -struct ODP_ALIGNED_CACHE cuckoo_table_bucket { - struct cuckoo_table_signatures signatures[HASH_BUCKET_ENTRIES]; - /* Includes dummy key index that always contains index 0 */ - odp_buffer_t key_buf[HASH_BUCKET_ENTRIES + 1]; - uint8_t flag[HASH_BUCKET_ENTRIES]; -}; - -/** A hash table structure. */ -typedef struct ODP_ALIGNED_CACHE { - /** for check */ - uint32_t magicword; - /** Name of the hash. */ - char name[TABLE_NAME_LEN]; - /** Total table entries. */ - uint32_t entries; - /** Number of buckets in table. */ - uint32_t num_buckets; - /** Length of hash key. */ - uint32_t key_len; - /** Length of value. */ - uint32_t value_len; - /** Bitmask for getting bucket index from hash signature. */ - uint32_t bucket_bitmask; - /** Queue that stores all free key-value slots*/ - odp_queue_t free_slots; - /** Table with buckets storing all the hash values and key indexes to the key table */ - struct cuckoo_table_bucket *buckets; -} cuckoo_table_impl; - -/** - * Aligns input parameter to the next power of 2 - * - * @param x - * The integer value to algin - * - * @return - * Input parameter aligned to the next power of 2 - */ -static inline uint32_t -align32pow2(uint32_t x) -{ - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - - return x + 1; -} - -table_t cuckoo_table_lookup(const char *name) -{ - cuckoo_table_impl *tbl = NULL; - odp_shm_t shm; - - if (name == NULL || strlen(name) >= TABLE_NAME_LEN) - return NULL; - - shm = odp_shm_lookup(name); - if (shm != ODP_SHM_INVALID) - tbl = odp_shm_addr(shm); - if (!tbl || tbl->magicword != CUCKOO_TABLE_MAGIC_WORD) - return NULL; - - if (strcmp(tbl->name, name)) - return NULL; - - return (table_t)tbl; -} - -table_t cuckoo_table_create(const char *name, uint32_t capacity, - uint32_t key_size, uint32_t value_size) -{ - cuckoo_table_impl *tbl; - odp_shm_t shm_tbl; - - odp_pool_t pool; - odp_pool_param_t param; - - odp_queue_t queue; - odp_queue_param_t qparam; - odp_queue_capability_t qcapa; - odp_pool_capability_t pcapa; - - char pool_name[TABLE_NAME_LEN + 3]; - char queue_name[TABLE_NAME_LEN + 3]; - uint32_t impl_size; - uint32_t kv_entry_size; - uint32_t bucket_num; - uint32_t bucket_size; - - if (odp_queue_capability(&qcapa)) { - ODPH_DBG("queue capa failed\n"); - return NULL; - } - - if (qcapa.plain.max_size && qcapa.plain.max_size < capacity) { - ODPH_DBG("queue max_size too small\n"); - return NULL; - } - - if (odp_pool_capability(&pcapa)) { - ODPH_DBG("pool capa failed\n"); - return NULL; - } - - if (pcapa.buf.max_num && pcapa.buf.max_num < capacity) { - ODPH_DBG("pool max_num too small\n"); - return NULL; - } - - /* Check for valid parameters */ - if (capacity > HASH_ENTRIES_MAX || capacity < HASH_BUCKET_ENTRIES || - key_size == 0 || strlen(name) == 0) { - ODPH_DBG("invalid parameters\n"); - return NULL; - } - - /* Guarantee there's no existing */ - tbl = (cuckoo_table_impl *)(void *)cuckoo_table_lookup(name); - if (tbl != NULL) { - ODPH_DBG("cuckoo hash table %s already exists\n", name); - return NULL; - } - - /* Calculate the sizes of different parts of cuckoo hash table */ - impl_size = sizeof(cuckoo_table_impl); - kv_entry_size = sizeof(struct cuckoo_table_key_value) + key_size + value_size; - - bucket_num = align32pow2(capacity) / HASH_BUCKET_ENTRIES; - bucket_size = bucket_num * sizeof(struct cuckoo_table_bucket); - - shm_tbl = odp_shm_reserve(name, impl_size + bucket_size, ODP_CACHE_LINE_SIZE, 0); - if (shm_tbl == ODP_SHM_INVALID) { - ODPH_DBG("shm allocation failed for cuckoo_table_impl %s\n", name); - return NULL; - } - - tbl = odp_shm_addr(shm_tbl); - memset(tbl, 0, impl_size + bucket_size); - - /* header of this mem block is the table impl struct, - * then the bucket pool. - */ - tbl->buckets = (void *)((char *)tbl + impl_size); - - /* initialize key-value buffer pool */ - snprintf(pool_name, sizeof(pool_name), "kv_%s", name); - pool = odp_pool_lookup(pool_name); - - if (pool != ODP_POOL_INVALID) { - if (odp_pool_destroy(pool)) { - odp_shm_free(shm_tbl); - ODPH_DBG("failed to destroy pre-existing pool\n"); - return NULL; - } - } - - odp_pool_param_init(¶m); - param.type = ODP_POOL_BUFFER; - param.buf.size = kv_entry_size; - if (pcapa.buf.max_align >= ODP_CACHE_LINE_SIZE) - param.buf.align = ODP_CACHE_LINE_SIZE; - param.buf.num = capacity; - - pool = odp_pool_create(pool_name, ¶m); - - if (pool == ODP_POOL_INVALID) { - ODPH_DBG("failed to create key-value pool\n"); - odp_shm_free(shm_tbl); - return NULL; - } - - /* initialize free_slots queue */ - odp_queue_param_init(&qparam); - qparam.type = ODP_QUEUE_TYPE_PLAIN; - qparam.size = capacity; - - snprintf(queue_name, sizeof(queue_name), "fs_%s", name); - queue = odp_queue_create(queue_name, &qparam); - if (queue == ODP_QUEUE_INVALID) { - ODPH_DBG("failed to create free_slots queue\n"); - (void)odp_pool_destroy(pool); - odp_shm_free(shm_tbl); - return NULL; - } - - /* Setup hash context */ - snprintf(tbl->name, sizeof(tbl->name), "%s", name); - tbl->magicword = CUCKOO_TABLE_MAGIC_WORD; - tbl->entries = capacity; - tbl->key_len = key_size; - tbl->value_len = value_size; - tbl->num_buckets = bucket_num; - tbl->bucket_bitmask = bucket_num - 1; - tbl->free_slots = queue; - - /* generate all free buffers, and put into queue */ - for (uint32_t i = 0; i < capacity; i++) { - odp_event_t ev = odp_buffer_to_event(odp_buffer_alloc(pool)); - - if (ev == ODP_EVENT_INVALID) { - ODPH_DBG("failed to generate free slots\n"); - cuckoo_table_destroy((table_t)tbl); - return NULL; - } - - if (odp_queue_enq(queue, ev) < 0) { - ODPH_DBG("failed to enqueue free slots\n"); - cuckoo_table_destroy((table_t)tbl); - return NULL; - } - } - - return (table_t)tbl; -} - -int cuckoo_table_destroy(table_t tbl) -{ - int ret; - cuckoo_table_impl *impl = NULL; - char pool_name[TABLE_NAME_LEN + 3]; - odp_event_t ev; - odp_shm_t shm; - odp_pool_t pool; - uint32_t i, j; - - if (tbl == NULL) - return -1; - - impl = (cuckoo_table_impl *)(void *)tbl; - - /* check magic word */ - if (impl->magicword != CUCKOO_TABLE_MAGIC_WORD) { - ODPH_DBG("wrong magicword for cuckoo table\n"); - return -1; - } - - /* free all used buffers*/ - for (i = 0; i < impl->num_buckets; i++) { - for (j = 0; j < HASH_BUCKET_ENTRIES; j++) { - if (impl->buckets[i].signatures[j].current != NULL_SIGNATURE) - odp_buffer_free(impl->buckets[i].key_buf[j]); - } - } - - /* free all free buffers */ - while ((ev = odp_queue_deq(impl->free_slots)) != ODP_EVENT_INVALID) - odp_buffer_free(odp_buffer_from_event(ev)); - - /* destroy free_slots queue */ - ret = odp_queue_destroy(impl->free_slots); - if (ret < 0) - ODPH_DBG("failed to destroy free_slots queue\n"); - - /* destroy key-value pool */ - snprintf(pool_name, sizeof(pool_name), "kv_%s", impl->name); - pool = odp_pool_lookup(pool_name); - if (pool == ODP_POOL_INVALID) { - ODPH_DBG("invalid pool\n"); - return -1; - } - - ret = odp_pool_destroy(pool); - if (ret != 0) { - ODPH_DBG("failed to destroy key-value buffer pool\n"); - return -1; - } - - /* free impl */ - shm = odp_shm_lookup(impl->name); - if (shm == ODP_SHM_INVALID) { - ODPH_DBG("unable look up shm\n"); - return -1; - } - - return odp_shm_free(shm); -} - -static uint32_t hash(const cuckoo_table_impl *h, const void *key) -{ - /* calc hash result by key */ - return odp_hash_crc32c(key, h->key_len, 0); -} - -/* Calc the secondary hash value from the primary hash value of a given key */ -static inline uint32_t -hash_secondary(const uint32_t primary_hash) -{ - static const unsigned int all_bits_shift = 12; - static const unsigned int alt_bits_xor = 0x5bd1e995; - - uint32_t tag = primary_hash >> all_bits_shift; - - return (primary_hash ^ ((tag + 1) * alt_bits_xor)); -} - -/* Search for an entry that can be pushed to its alternative location */ -static inline int -make_space_bucket(const cuckoo_table_impl *impl, - struct cuckoo_table_bucket *bkt) -{ - unsigned int i; - unsigned int j; - int ret; - uint32_t next_bucket_idx; - struct cuckoo_table_bucket *next_bkt[HASH_BUCKET_ENTRIES]; - - /* - * Push existing item (search for bucket with space in - * alternative locations) to its alternative location - */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - /* Search for space in alternative locations */ - next_bucket_idx = bkt->signatures[i].alt & impl->bucket_bitmask; - next_bkt[i] = &impl->buckets[next_bucket_idx]; - for (j = 0; j < HASH_BUCKET_ENTRIES; j++) { - if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE) - break; - } - - if (j != HASH_BUCKET_ENTRIES) - break; - } - - /* Alternative location has spare room (end of recursive function) */ - if (i != HASH_BUCKET_ENTRIES) { - next_bkt[i]->signatures[j].alt = bkt->signatures[i].current; - next_bkt[i]->signatures[j].current = bkt->signatures[i].alt; - next_bkt[i]->key_buf[j] = bkt->key_buf[i]; - return i; - } - - /* Pick entry that has not been pushed yet */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) - if (bkt->flag[i] == 0) - break; - - /* All entries have been pushed, so entry cannot be added */ - if (i == HASH_BUCKET_ENTRIES) - return -ENOSPC; - - /* Set flag to indicate that this entry is going to be pushed */ - bkt->flag[i] = 1; - /* Need room in alternative bucket to insert the pushed entry */ - ret = make_space_bucket(impl, next_bkt[i]); - /* - * After recursive function. - * Clear flags and insert the pushed entry - * in its alternative location if successful, - * or return error - */ - bkt->flag[i] = 0; - if (ret >= 0) { - next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current; - next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt; - next_bkt[i]->key_buf[ret] = bkt->key_buf[i]; - return i; - } - - return ret; -} - -static inline int32_t -cuckoo_table_add_key_with_hash(const cuckoo_table_impl *h, - const void *key, uint32_t sig, void *data) -{ - uint32_t alt_hash; - uint32_t prim_bucket_idx, sec_bucket_idx; - unsigned int i; - struct cuckoo_table_bucket *prim_bkt, *sec_bkt; - struct cuckoo_table_key_value *new_kv, *kv; - - odp_buffer_t new_buf; - int ret; - - prim_bucket_idx = sig & h->bucket_bitmask; - prim_bkt = &h->buckets[prim_bucket_idx]; - __builtin_prefetch((const void *)(uintptr_t)prim_bkt, 0, 3); - - alt_hash = hash_secondary(sig); - sec_bucket_idx = alt_hash & h->bucket_bitmask; - sec_bkt = &h->buckets[sec_bucket_idx]; - __builtin_prefetch((const void *)(uintptr_t)sec_bkt, 0, 3); - - /* Get a new slot for storing the new key */ - new_buf = odp_buffer_from_event(odp_queue_deq(h->free_slots)); - if (new_buf == ODP_BUFFER_INVALID) - return -ENOSPC; - - /* Check if key is already inserted in primary location */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - if (prim_bkt->signatures[i].current == sig && - prim_bkt->signatures[i].alt == alt_hash) { - kv = (struct cuckoo_table_key_value *)odp_buffer_addr(prim_bkt->key_buf[i]); - if (memcmp(key, kv->key, h->key_len) == 0) { - odp_queue_enq(h->free_slots, odp_buffer_to_event(new_buf)); - /* Update data */ - if (kv->value != NULL) - memcpy(kv->value, data, h->value_len); - /* Return bucket index */ - return prim_bucket_idx; - } - } - } - - /* Check if key is already inserted in secondary location */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - if (sec_bkt->signatures[i].alt == sig && - sec_bkt->signatures[i].current == alt_hash) { - kv = (struct cuckoo_table_key_value *)odp_buffer_addr(sec_bkt->key_buf[i]); - if (memcmp(key, kv->key, h->key_len) == 0) { - odp_queue_enq(h->free_slots, odp_buffer_to_event(new_buf)); - /* Update data */ - if (kv->value != NULL) - memcpy(kv->value, data, h->value_len); - /* Return bucket index */ - return sec_bucket_idx; - } - } - } - - new_kv = (struct cuckoo_table_key_value *)odp_buffer_addr(new_buf); - __builtin_prefetch((const void *)(uintptr_t)new_kv, 0, 3); - - /* Copy key and value. - * key-value mem block : struct cuckoo_table_key_value - * + key (key_len) + value (value_len) - */ - new_kv->key = (uint8_t *)new_kv + sizeof(struct cuckoo_table_key_value); - memcpy(new_kv->key, key, h->key_len); - - if (h->value_len > 0) { - new_kv->value = new_kv->key + h->key_len; - memcpy(new_kv->value, data, h->value_len); - } else { - new_kv->value = NULL; - } - - /* Insert new entry is there is room in the primary bucket */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - /* Check if slot is available */ - if (odp_likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) { - prim_bkt->signatures[i].current = sig; - prim_bkt->signatures[i].alt = alt_hash; - prim_bkt->key_buf[i] = new_buf; - return prim_bucket_idx; - } - } - - /* Primary bucket is full, so we need to make space for new entry */ - ret = make_space_bucket(h, prim_bkt); - - /* - * After recursive function. - * Insert the new entry in the position of the pushed entry - * if successful or return error and - * store the new slot back in the pool - */ - if (ret >= 0) { - prim_bkt->signatures[ret].current = sig; - prim_bkt->signatures[ret].alt = alt_hash; - prim_bkt->key_buf[ret] = new_buf; - return prim_bucket_idx; - } - - /* Error in addition, store new slot back in the free_slots */ - odp_queue_enq(h->free_slots, odp_buffer_to_event(new_buf)); - - return ret; -} - -int cuckoo_table_put_value(table_t tbl, void *key, void *value) -{ - cuckoo_table_impl *impl; - int ret; - - if (tbl == NULL || key == NULL) - return -EINVAL; - - impl = (cuckoo_table_impl *)(void *)tbl; - ret = cuckoo_table_add_key_with_hash(impl, key, hash(impl, key), value); - - if (ret < 0) - return -1; - - return 0; -} - -static inline int32_t -cuckoo_table_lookup_with_hash(const cuckoo_table_impl *h, const void *key, - uint32_t sig, void **data_ptr) -{ - uint32_t bucket_idx; - uint32_t alt_hash; - unsigned int i; - struct cuckoo_table_bucket *bkt; - struct cuckoo_table_key_value *kv; - - bucket_idx = sig & h->bucket_bitmask; - bkt = &h->buckets[bucket_idx]; - - /* Check if key is in primary location */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - if (bkt->signatures[i].current == sig && - bkt->signatures[i].sig != NULL_SIGNATURE) { - kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); - if (memcmp(key, kv->key, h->key_len) == 0) { - if (data_ptr != NULL) - *data_ptr = kv->value; - /* - * Return index where key is stored, - * subtracting the first dummy index - */ - return bucket_idx; - } - } - } - - /* Calculate secondary hash */ - alt_hash = hash_secondary(sig); - bucket_idx = alt_hash & h->bucket_bitmask; - bkt = &h->buckets[bucket_idx]; - - /* Check if key is in secondary location */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - if (bkt->signatures[i].current == alt_hash && - bkt->signatures[i].alt == sig) { - kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); - if (memcmp(key, kv->key, h->key_len) == 0) { - if (data_ptr != NULL) - *data_ptr = kv->value; - /* - * Return index where key is stored, - * subtracting the first dummy index - */ - return bucket_idx; - } - } - } - - return -ENOENT; -} - -int cuckoo_table_get_value(table_t tbl, void *key, void *buffer, - uint32_t buffer_size ODP_UNUSED) -{ - cuckoo_table_impl *impl = (cuckoo_table_impl *)(void *)tbl; - void *tmp = NULL; - int ret; - - if (tbl == NULL || key == NULL) - return -EINVAL; - - ret = cuckoo_table_lookup_with_hash(impl, key, hash(impl, key), &tmp); - - if (ret < 0) - return -1; - - if (impl->value_len > 0) - memcpy(buffer, tmp, impl->value_len); - - return 0; -} - -static inline int32_t -cuckoo_table_del_key_with_hash(const cuckoo_table_impl *h, - const void *key, uint32_t sig) -{ - uint32_t bucket_idx; - uint32_t alt_hash; - unsigned int i; - struct cuckoo_table_bucket *bkt; - struct cuckoo_table_key_value *kv; - - bucket_idx = sig & h->bucket_bitmask; - bkt = &h->buckets[bucket_idx]; - - /* Check if key is in primary location */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - if (bkt->signatures[i].current == sig && - bkt->signatures[i].sig != NULL_SIGNATURE) { - kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); - if (memcmp(key, kv->key, h->key_len) == 0) { - bkt->signatures[i].sig = NULL_SIGNATURE; - odp_queue_enq(h->free_slots, odp_buffer_to_event(bkt->key_buf[i])); - return bucket_idx; - } - } - } - - /* Calculate secondary hash */ - alt_hash = hash_secondary(sig); - bucket_idx = alt_hash & h->bucket_bitmask; - bkt = &h->buckets[bucket_idx]; - - /* Check if key is in secondary location */ - for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { - if (bkt->signatures[i].current == alt_hash && - bkt->signatures[i].sig != NULL_SIGNATURE) { - kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); - if (memcmp(key, kv->key, h->key_len) == 0) { - bkt->signatures[i].sig = NULL_SIGNATURE; - odp_queue_enq(h->free_slots, odp_buffer_to_event(bkt->key_buf[i])); - return bucket_idx; - } - } - } - - return -ENOENT; -} - -int cuckoo_table_remove_value(table_t tbl, void *key) -{ - cuckoo_table_impl *impl = (void *)tbl; - int ret; - - if (tbl == NULL || key == NULL) - return -EINVAL; - - ret = cuckoo_table_del_key_with_hash(impl, key, hash(impl, key)); - if (ret < 0) - return -1; - - return 0; -} - -table_ops_t cuckoo_table_ops = { - cuckoo_table_create, - cuckoo_table_lookup, - cuckoo_table_destroy, - cuckoo_table_put_value, - cuckoo_table_get_value, - cuckoo_table_remove_value -}; +/* + * Copyright (c) 2024, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016-2018 Linaro Limited + */ + +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include +#include + +#include "table.h" +#include "cuckootable.h" + +/** + * @magic word, write to the first byte of the memory block + * to indicate this block is used by a cuckoo hash table + */ +#define CUCKOO_TABLE_MAGIC_WORD 0xDFDFFDFD + +/** Number of items per bucket. */ +#define HASH_BUCKET_ENTRIES 4 + +#define NULL_SIGNATURE 0 +#define KEY_ALIGNMENT 16 + +/** Maximum size of hash table that can be created. */ +#define HASH_ENTRIES_MAX 1048576 + +/** + * @internal signature struct + * Structure storing both primary and secondary hashes + */ +struct cuckoo_table_signatures { + union { + struct { + uint32_t current; + uint32_t alt; + }; + uint64_t sig; + }; +}; + +/** + * @internal key-value struct + * Structure that stores key-value pair + */ +struct cuckoo_table_key_value { + uint8_t *key; + uint8_t *value; +}; + +/** + * @internal bucket structure + * Put the elements with different keys but same signature into a bucket. + * Each bucket has at most HASH_BUCKET_ENTRIES elements. + */ +struct ODP_ALIGNED_CACHE cuckoo_table_bucket { + struct cuckoo_table_signatures signatures[HASH_BUCKET_ENTRIES]; + /* Includes dummy key index that always contains index 0 */ + odp_buffer_t key_buf[HASH_BUCKET_ENTRIES + 1]; + uint8_t flag[HASH_BUCKET_ENTRIES]; +}; + +/** A hash table structure. */ +typedef struct ODP_ALIGNED_CACHE { + /** for check */ + uint32_t magicword; + /** Name of the hash. */ + char name[TABLE_NAME_LEN]; + /** Total table entries. */ + uint32_t entries; + /** Number of buckets in table. */ + uint32_t num_buckets; + /** Length of hash key. */ + uint32_t key_len; + /** Length of value. */ + uint32_t value_len; + /** Bitmask for getting bucket index from hash signature. */ + uint32_t bucket_bitmask; + /** Queue that stores all free key-value slots*/ + odp_queue_t free_slots; + /** Table with buckets storing all the hash values and key indexes to the key table */ + struct cuckoo_table_bucket *buckets; +} cuckoo_table_impl; + +/** + * Aligns input parameter to the next power of 2 + * + * @param x + * The integer value to align + * + * @return + * Input parameter aligned to the next power of 2 + */ +static inline uint32_t +align32pow2(uint32_t x) +{ + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + + return x + 1; +} + +table_t cuckoo_table_lookup(const char *name) +{ + cuckoo_table_impl *tbl = NULL; + odp_shm_t shm; + + if (name == NULL || strlen(name) >= TABLE_NAME_LEN) + return NULL; + + shm = odp_shm_lookup(name); + if (shm != ODP_SHM_INVALID) + tbl = odp_shm_addr(shm); + if (!tbl || tbl->magicword != CUCKOO_TABLE_MAGIC_WORD) + return NULL; + + if (strcmp(tbl->name, name)) + return NULL; + + return (table_t)tbl; +} + +table_t cuckoo_table_create(const char *name, uint32_t capacity, + uint32_t key_size, uint32_t value_size) +{ + cuckoo_table_impl *tbl; + odp_shm_t shm_tbl; + + odp_pool_t pool; + odp_pool_param_t param; + + odp_queue_t queue; + odp_queue_param_t qparam; + odp_queue_capability_t qcapa; + odp_pool_capability_t pcapa; + + char pool_name[TABLE_NAME_LEN + 3]; + char queue_name[TABLE_NAME_LEN + 3]; + uint32_t impl_size; + uint32_t kv_entry_size; + uint32_t bucket_num; + uint32_t bucket_size; + + if (odp_queue_capability(&qcapa)) { + ODPH_DBG("queue capa failed\n"); + return NULL; + } + + if (qcapa.plain.max_size && qcapa.plain.max_size < capacity) { + ODPH_DBG("queue max_size too small\n"); + return NULL; + } + + if (odp_pool_capability(&pcapa)) { + ODPH_DBG("pool capa failed\n"); + return NULL; + } + + if (pcapa.buf.max_num && pcapa.buf.max_num < capacity) { + ODPH_DBG("pool max_num too small\n"); + return NULL; + } + + /* Check for valid parameters */ + if (capacity > HASH_ENTRIES_MAX || capacity < HASH_BUCKET_ENTRIES || + key_size == 0 || strlen(name) == 0) { + ODPH_DBG("invalid parameters\n"); + return NULL; + } + + /* Guarantee there's no existing */ + tbl = (cuckoo_table_impl *)(void *)cuckoo_table_lookup(name); + if (tbl != NULL) { + ODPH_DBG("cuckoo hash table %s already exists\n", name); + return NULL; + } + + /* Calculate the sizes of different parts of cuckoo hash table */ + impl_size = sizeof(cuckoo_table_impl); + kv_entry_size = sizeof(struct cuckoo_table_key_value) + key_size + value_size; + + bucket_num = align32pow2(capacity) / HASH_BUCKET_ENTRIES; + bucket_size = bucket_num * sizeof(struct cuckoo_table_bucket); + + shm_tbl = odp_shm_reserve(name, impl_size + bucket_size, ODP_CACHE_LINE_SIZE, 0); + if (shm_tbl == ODP_SHM_INVALID) { + ODPH_DBG("shm allocation failed for cuckoo_table_impl %s\n", name); + return NULL; + } + + tbl = odp_shm_addr(shm_tbl); + memset(tbl, 0, impl_size + bucket_size); + + /* header of this mem block is the table impl struct, + * then the bucket pool. + */ + tbl->buckets = (void *)((char *)tbl + impl_size); + + /* initialize key-value buffer pool */ + snprintf(pool_name, sizeof(pool_name), "kv_%s", name); + pool = odp_pool_lookup(pool_name); + + if (pool != ODP_POOL_INVALID) { + if (odp_pool_destroy(pool)) { + odp_shm_free(shm_tbl); + ODPH_DBG("failed to destroy pre-existing pool\n"); + return NULL; + } + } + + odp_pool_param_init(¶m); + param.type = ODP_POOL_BUFFER; + param.buf.size = kv_entry_size; + if (pcapa.buf.max_align >= ODP_CACHE_LINE_SIZE) + param.buf.align = ODP_CACHE_LINE_SIZE; + param.buf.num = capacity; + + pool = odp_pool_create(pool_name, ¶m); + + if (pool == ODP_POOL_INVALID) { + ODPH_DBG("failed to create key-value pool\n"); + odp_shm_free(shm_tbl); + return NULL; + } + + /* initialize free_slots queue */ + odp_queue_param_init(&qparam); + qparam.type = ODP_QUEUE_TYPE_PLAIN; + qparam.size = capacity; + + snprintf(queue_name, sizeof(queue_name), "fs_%s", name); + queue = odp_queue_create(queue_name, &qparam); + if (queue == ODP_QUEUE_INVALID) { + ODPH_DBG("failed to create free_slots queue\n"); + (void)odp_pool_destroy(pool); + odp_shm_free(shm_tbl); + return NULL; + } + + /* Setup hash context */ + snprintf(tbl->name, sizeof(tbl->name), "%s", name); + tbl->magicword = CUCKOO_TABLE_MAGIC_WORD; + tbl->entries = capacity; + tbl->key_len = key_size; + tbl->value_len = value_size; + tbl->num_buckets = bucket_num; + tbl->bucket_bitmask = bucket_num - 1; + tbl->free_slots = queue; + + /* generate all free buffers, and put into queue */ + for (uint32_t i = 0; i < capacity; i++) { + odp_event_t ev = odp_buffer_to_event(odp_buffer_alloc(pool)); + + if (ev == ODP_EVENT_INVALID) { + ODPH_DBG("failed to generate free slots\n"); + cuckoo_table_destroy((table_t)tbl); + return NULL; + } + + if (odp_queue_enq(queue, ev) < 0) { + ODPH_DBG("failed to enqueue free slots\n"); + cuckoo_table_destroy((table_t)tbl); + return NULL; + } + } + + return (table_t)tbl; +} + +int cuckoo_table_destroy(table_t tbl) +{ + int ret; + cuckoo_table_impl *impl = NULL; + char pool_name[TABLE_NAME_LEN + 3]; + odp_event_t ev; + odp_shm_t shm; + odp_pool_t pool; + uint32_t i, j; + + if (tbl == NULL) + return -1; + + impl = (cuckoo_table_impl *)(void *)tbl; + + /* check magic word */ + if (impl->magicword != CUCKOO_TABLE_MAGIC_WORD) { + ODPH_DBG("wrong magicword for cuckoo table\n"); + return -1; + } + + /* free all used buffers*/ + for (i = 0; i < impl->num_buckets; i++) { + for (j = 0; j < HASH_BUCKET_ENTRIES; j++) { + if (impl->buckets[i].signatures[j].current != NULL_SIGNATURE) + odp_buffer_free(impl->buckets[i].key_buf[j]); + } + } + + /* free all free buffers */ + while ((ev = odp_queue_deq(impl->free_slots)) != ODP_EVENT_INVALID) + odp_buffer_free(odp_buffer_from_event(ev)); + + /* destroy free_slots queue */ + ret = odp_queue_destroy(impl->free_slots); + if (ret < 0) + ODPH_DBG("failed to destroy free_slots queue\n"); + + /* destroy key-value pool */ + snprintf(pool_name, sizeof(pool_name), "kv_%s", impl->name); + pool = odp_pool_lookup(pool_name); + if (pool == ODP_POOL_INVALID) { + ODPH_DBG("invalid pool\n"); + return -1; + } + + ret = odp_pool_destroy(pool); + if (ret != 0) { + ODPH_DBG("failed to destroy key-value buffer pool\n"); + return -1; + } + + /* free impl */ + shm = odp_shm_lookup(impl->name); + if (shm == ODP_SHM_INVALID) { + ODPH_DBG("unable look up shm\n"); + return -1; + } + + return odp_shm_free(shm); +} + +static uint32_t hash(const cuckoo_table_impl *h, const void *key) +{ + /* calc hash result by key */ + return odp_hash_crc32c(key, h->key_len, 0); +} + +/* Calc the secondary hash value from the primary hash value of a given key */ +static inline uint32_t +hash_secondary(const uint32_t primary_hash) +{ + static const unsigned int all_bits_shift = 12; + static const unsigned int alt_bits_xor = 0x5bd1e995; + + uint32_t tag = primary_hash >> all_bits_shift; + + return (primary_hash ^ ((tag + 1) * alt_bits_xor)); +} + +/* Search for an entry that can be pushed to its alternative location */ +static inline int +make_space_bucket(const cuckoo_table_impl *impl, + struct cuckoo_table_bucket *bkt) +{ + unsigned int i; + unsigned int j; + int ret; + uint32_t next_bucket_idx; + struct cuckoo_table_bucket *next_bkt[HASH_BUCKET_ENTRIES]; + + /* + * Push existing item (search for bucket with space in + * alternative locations) to its alternative location + */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + /* Search for space in alternative locations */ + next_bucket_idx = bkt->signatures[i].alt & impl->bucket_bitmask; + next_bkt[i] = &impl->buckets[next_bucket_idx]; + for (j = 0; j < HASH_BUCKET_ENTRIES; j++) { + if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE) + break; + } + + if (j != HASH_BUCKET_ENTRIES) + break; + } + + /* Alternative location has spare room (end of recursive function) */ + if (i != HASH_BUCKET_ENTRIES) { + next_bkt[i]->signatures[j].alt = bkt->signatures[i].current; + next_bkt[i]->signatures[j].current = bkt->signatures[i].alt; + next_bkt[i]->key_buf[j] = bkt->key_buf[i]; + return i; + } + + /* Pick entry that has not been pushed yet */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) + if (bkt->flag[i] == 0) + break; + + /* All entries have been pushed, so entry cannot be added */ + if (i == HASH_BUCKET_ENTRIES) + return -ENOSPC; + + /* Set flag to indicate that this entry is going to be pushed */ + bkt->flag[i] = 1; + /* Need room in alternative bucket to insert the pushed entry */ + ret = make_space_bucket(impl, next_bkt[i]); + /* + * After recursive function. + * Clear flags and insert the pushed entry + * in its alternative location if successful, + * or return error + */ + bkt->flag[i] = 0; + if (ret >= 0) { + next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current; + next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt; + next_bkt[i]->key_buf[ret] = bkt->key_buf[i]; + return i; + } + + return ret; +} + +static inline int32_t +cuckoo_table_add_key_with_hash(const cuckoo_table_impl *h, + const void *key, uint32_t sig, void *data) +{ + uint32_t alt_hash; + uint32_t prim_bucket_idx, sec_bucket_idx; + unsigned int i; + struct cuckoo_table_bucket *prim_bkt, *sec_bkt; + struct cuckoo_table_key_value *new_kv, *kv; + + odp_buffer_t new_buf; + int ret; + + prim_bucket_idx = sig & h->bucket_bitmask; + prim_bkt = &h->buckets[prim_bucket_idx]; + __builtin_prefetch((const void *)(uintptr_t)prim_bkt, 0, 3); + + alt_hash = hash_secondary(sig); + sec_bucket_idx = alt_hash & h->bucket_bitmask; + sec_bkt = &h->buckets[sec_bucket_idx]; + __builtin_prefetch((const void *)(uintptr_t)sec_bkt, 0, 3); + + /* Get a new slot for storing the new key */ + new_buf = odp_buffer_from_event(odp_queue_deq(h->free_slots)); + if (new_buf == ODP_BUFFER_INVALID) + return -ENOSPC; + + /* Check if key is already inserted in primary location */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + if (prim_bkt->signatures[i].current == sig && + prim_bkt->signatures[i].alt == alt_hash) { + kv = (struct cuckoo_table_key_value *)odp_buffer_addr(prim_bkt->key_buf[i]); + if (memcmp(key, kv->key, h->key_len) == 0) { + odp_queue_enq(h->free_slots, odp_buffer_to_event(new_buf)); + /* Update data */ + if (kv->value != NULL) + memcpy(kv->value, data, h->value_len); + /* Return bucket index */ + return prim_bucket_idx; + } + } + } + + /* Check if key is already inserted in secondary location */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + if (sec_bkt->signatures[i].alt == sig && + sec_bkt->signatures[i].current == alt_hash) { + kv = (struct cuckoo_table_key_value *)odp_buffer_addr(sec_bkt->key_buf[i]); + if (memcmp(key, kv->key, h->key_len) == 0) { + odp_queue_enq(h->free_slots, odp_buffer_to_event(new_buf)); + /* Update data */ + if (kv->value != NULL) + memcpy(kv->value, data, h->value_len); + /* Return bucket index */ + return sec_bucket_idx; + } + } + } + + new_kv = (struct cuckoo_table_key_value *)odp_buffer_addr(new_buf); + __builtin_prefetch((const void *)(uintptr_t)new_kv, 0, 3); + + /* Copy key and value. + * key-value mem block : struct cuckoo_table_key_value + * + key (key_len) + value (value_len) + */ + new_kv->key = (uint8_t *)new_kv + sizeof(struct cuckoo_table_key_value); + memcpy(new_kv->key, key, h->key_len); + + if (h->value_len > 0) { + new_kv->value = new_kv->key + h->key_len; + memcpy(new_kv->value, data, h->value_len); + } else { + new_kv->value = NULL; + } + + /* Insert new entry is there is room in the primary bucket */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + /* Check if slot is available */ + if (odp_likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) { + prim_bkt->signatures[i].current = sig; + prim_bkt->signatures[i].alt = alt_hash; + prim_bkt->key_buf[i] = new_buf; + return prim_bucket_idx; + } + } + + /* Primary bucket is full, so we need to make space for new entry */ + ret = make_space_bucket(h, prim_bkt); + + /* + * After recursive function. + * Insert the new entry in the position of the pushed entry + * if successful or return error and + * store the new slot back in the pool + */ + if (ret >= 0) { + prim_bkt->signatures[ret].current = sig; + prim_bkt->signatures[ret].alt = alt_hash; + prim_bkt->key_buf[ret] = new_buf; + return prim_bucket_idx; + } + + /* Error in addition, store new slot back in the free_slots */ + odp_queue_enq(h->free_slots, odp_buffer_to_event(new_buf)); + + return ret; +} + +int cuckoo_table_put_value(table_t tbl, void *key, void *value) +{ + cuckoo_table_impl *impl; + int ret; + + if (tbl == NULL || key == NULL) + return -EINVAL; + + impl = (cuckoo_table_impl *)(void *)tbl; + ret = cuckoo_table_add_key_with_hash(impl, key, hash(impl, key), value); + + if (ret < 0) + return -1; + + return 0; +} + +static inline int32_t +cuckoo_table_lookup_with_hash(const cuckoo_table_impl *h, const void *key, + uint32_t sig, void **data_ptr) +{ + uint32_t bucket_idx; + uint32_t alt_hash; + unsigned int i; + struct cuckoo_table_bucket *bkt; + struct cuckoo_table_key_value *kv; + + bucket_idx = sig & h->bucket_bitmask; + bkt = &h->buckets[bucket_idx]; + + /* Check if key is in primary location */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + if (bkt->signatures[i].current == sig && + bkt->signatures[i].sig != NULL_SIGNATURE) { + kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); + if (memcmp(key, kv->key, h->key_len) == 0) { + if (data_ptr != NULL) + *data_ptr = kv->value; + /* + * Return index where key is stored, + * subtracting the first dummy index + */ + return bucket_idx; + } + } + } + + /* Calculate secondary hash */ + alt_hash = hash_secondary(sig); + bucket_idx = alt_hash & h->bucket_bitmask; + bkt = &h->buckets[bucket_idx]; + + /* Check if key is in secondary location */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + if (bkt->signatures[i].current == alt_hash && + bkt->signatures[i].alt == sig) { + kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); + if (memcmp(key, kv->key, h->key_len) == 0) { + if (data_ptr != NULL) + *data_ptr = kv->value; + /* + * Return index where key is stored, + * subtracting the first dummy index + */ + return bucket_idx; + } + } + } + + return -ENOENT; +} + +int cuckoo_table_get_value(table_t tbl, void *key, void *buffer, + uint32_t buffer_size ODP_UNUSED) +{ + cuckoo_table_impl *impl = (cuckoo_table_impl *)(void *)tbl; + void *tmp = NULL; + int ret; + + if (tbl == NULL || key == NULL) + return -EINVAL; + + ret = cuckoo_table_lookup_with_hash(impl, key, hash(impl, key), &tmp); + + if (ret < 0) + return -1; + + if (impl->value_len > 0) + memcpy(buffer, tmp, impl->value_len); + + return 0; +} + +static inline int32_t +cuckoo_table_del_key_with_hash(const cuckoo_table_impl *h, + const void *key, uint32_t sig) +{ + uint32_t bucket_idx; + uint32_t alt_hash; + unsigned int i; + struct cuckoo_table_bucket *bkt; + struct cuckoo_table_key_value *kv; + + bucket_idx = sig & h->bucket_bitmask; + bkt = &h->buckets[bucket_idx]; + + /* Check if key is in primary location */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + if (bkt->signatures[i].current == sig && + bkt->signatures[i].sig != NULL_SIGNATURE) { + kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); + if (memcmp(key, kv->key, h->key_len) == 0) { + bkt->signatures[i].sig = NULL_SIGNATURE; + odp_queue_enq(h->free_slots, odp_buffer_to_event(bkt->key_buf[i])); + return bucket_idx; + } + } + } + + /* Calculate secondary hash */ + alt_hash = hash_secondary(sig); + bucket_idx = alt_hash & h->bucket_bitmask; + bkt = &h->buckets[bucket_idx]; + + /* Check if key is in secondary location */ + for (i = 0; i < HASH_BUCKET_ENTRIES; i++) { + if (bkt->signatures[i].current == alt_hash && + bkt->signatures[i].sig != NULL_SIGNATURE) { + kv = (struct cuckoo_table_key_value *)odp_buffer_addr(bkt->key_buf[i]); + if (memcmp(key, kv->key, h->key_len) == 0) { + bkt->signatures[i].sig = NULL_SIGNATURE; + odp_queue_enq(h->free_slots, odp_buffer_to_event(bkt->key_buf[i])); + return bucket_idx; + } + } + } + + return -ENOENT; +} + +int cuckoo_table_remove_value(table_t tbl, void *key) +{ + cuckoo_table_impl *impl = (void *)tbl; + int ret; + + if (tbl == NULL || key == NULL) + return -EINVAL; + + ret = cuckoo_table_del_key_with_hash(impl, key, hash(impl, key)); + if (ret < 0) + return -1; + + return 0; +} + +table_ops_t cuckoo_table_ops = { + cuckoo_table_create, + cuckoo_table_lookup, + cuckoo_table_destroy, + cuckoo_table_put_value, + cuckoo_table_get_value, + cuckoo_table_remove_value +}; diff --git a/programs/example/error/error.c b/programs/example/error/error.c index 1953c34e..d63e6db8 100644 --- a/programs/example/error/error.c +++ b/programs/example/error/error.c @@ -1,667 +1,667 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine error handler example. - * - * Demonstrate and test the Event Machine error handling functionality, - * see the API calls em_error(), em_register_error_handler(), - * em_eo_register_error_handler() etc. - * - * Three application EOs are created, each with a dedicated queue. - * An application specific global error handler is registered (thus replacing - * the EM default). Additionally EO A will register an EO specific error - * handler. - * When the EOs receive events (error_receive) they will generate errors by - * explicit calls to em_error() and by calling EM-API functions with invalid - * arguments. The registered error handlers simply print the error information - * on screen. - * - * Note: Lots of the API-call return values are left unchecked for errors - * (especially in setup) since the error handler demonstrated in this example - * is not designed to handle 'real' errors. - */ - -#include -#include -#include - -#include -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -#define APPL_ESCOPE_INIT 1 -#define APPL_ESCOPE_OTHER 2 -#define APPL_ESCOPE_STR 3 -#define APPL_ESCOPE_STR_Q 4 -#define APPL_ESCOPE_STR_Q_SEQ 5 - -#define DELAY_SPIN_COUNT 50000000 - -#define TEST_ERROR_FATAL EM_ERROR_SET_FATAL(0xdead) -#define TEST_ERROR_1 0x1111 -#define TEST_ERROR_2 0x2222 -#define TEST_ERROR_3 0x3333 -#define TEST_ERROR_4 0x4444 - -/** - * Error test event - */ -typedef struct { - /* Destination queue for the reply event */ - em_queue_t dest; - /* Sequence number */ - unsigned int seq; - /* Indicate whether to report a fatal error or not */ - int fatal; -} error_event_t; - -/** - * EO context of error test application - */ -typedef union { - struct { - /* EO Id */ - em_eo_t eo; - /* EO name */ - char name[16]; - }; - /* Pad to cache line size */ - uint8_t u8[ENV_CACHE_LINE_SIZE]; -} eo_context_t; - -/** - * Error test shared memory - */ -typedef struct { - /* Event pool used by this application */ - em_pool_t pool; - /* EO A context from shared memory region */ - eo_context_t eo_error_a ENV_CACHE_LINE_ALIGNED; - /* EO B context from shared memory region */ - eo_context_t eo_error_b ENV_CACHE_LINE_ALIGNED; - /* EO C context from shared memory region */ - eo_context_t eo_error_c ENV_CACHE_LINE_ALIGNED; - /* Queue IDs - shared vars, test is NOT concerned with perf */ - em_queue_t queue_a ENV_CACHE_LINE_ALIGNED; - em_queue_t queue_b; - em_queue_t queue_c; - /* Pad to cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} error_shm_t; - -static ENV_LOCAL error_shm_t *error_shm; - -/* - * Local function prototypes - */ -static em_status_t -error_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -error_stop(void *eo_context, em_eo_t eo); - -static void -error_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -static em_status_t -global_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args); - -static em_status_t -eo_specific_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args); - -static em_status_t -combined_error_handler(const char *handler_name, em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Error Handler test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void test_init(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - if (core == 0) - error_shm = env_shared_reserve("ErrorSharedMem", - sizeof(error_shm_t)); - else - error_shm = env_shared_lookup("ErrorSharedMem"); - - if (error_shm == NULL) - em_error(EM_ERROR_SET_FATAL(0xec0de), APPL_ESCOPE_INIT, - "Error init failed on EM-core: %u\n", em_core_id()); - else if (core == 0) - memset(error_shm, 0, sizeof(error_shm_t)); -} - -/** - * Startup of the Error Handler test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void test_start(const appl_conf_t *appl_conf) -{ - em_eo_t eo; - em_event_t event; - em_queue_t queue; - em_status_t ret, start_ret = EM_ERROR; - error_event_t *error; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - error_shm->pool = appl_conf->pools[0]; - else - error_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%d\n" - " Application running on %u EM-cores (procs:%u, threads:%u)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, - error_shm->pool); - - /* - * Register the application specifig global error handler - * This replaces the EM internal default error handler - */ - ret = em_register_error_handler(global_error_handler); - test_fatal_if(ret != EM_OK, - "Register global error handler:%" PRI_STAT "", ret); - - /* Create and start EO "A" */ - eo = em_eo_create("EO A", error_start, NULL, error_stop, NULL, - error_receive, &error_shm->eo_error_a); - - queue = em_queue_create("queue A", EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_NORMAL, EM_QUEUE_GROUP_DEFAULT, - NULL); - - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT ".\n" - "EO:%" PRI_EO ", queue:%" PRI_QUEUE "", - ret, eo, queue); - - error_shm->eo_error_a.eo = eo; - error_shm->queue_a = queue; - - /* Register an application 'EO A'-specific error handler */ - ret = em_eo_register_error_handler(eo, eo_specific_error_handler); - test_fatal_if(ret != EM_OK, - "Register EO error handler:%" PRI_STAT "", ret); - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO A start:%" PRI_STAT " %" PRI_STAT ""); - - /* Create and start EO "B" */ - eo = em_eo_create("EO B", error_start, NULL, error_stop, NULL, - error_receive, &error_shm->eo_error_b); - - queue = em_queue_create("queue B", EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_NORMAL, EM_QUEUE_GROUP_DEFAULT, - NULL); - - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT ".\n" - "EO:%" PRI_EO ", queue:%" PRI_QUEUE "", - ret, eo, queue); - - error_shm->eo_error_b.eo = eo; - error_shm->queue_b = queue; - - /* - * Note: No 'EO B' specific error handler. Use the application specific - * global error handler instead. - */ - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO B start:%" PRI_STAT " %" PRI_STAT ""); - - /* Create and start EO "C" */ - eo = em_eo_create("EO C", error_start, NULL, error_stop, NULL, - error_receive, &error_shm->eo_error_c); - queue = em_queue_create("queue C", EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_HIGH, EM_QUEUE_GROUP_DEFAULT, - NULL); - - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT ".\n" - "EO:%" PRI_EO ", queue:%" PRI_QUEUE "", - ret, eo, queue); - - error_shm->eo_error_c.eo = eo; - error_shm->queue_c = queue; - - /* - * Note: No 'EO C' specific error handler. Use the application specific - * global error handler instead. - */ - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO C start:%" PRI_STAT " %" PRI_STAT ""); - - /* - * Send an event to EO A. - * Store EO B's queue as the destination queue for EO A. - */ - event = em_alloc(sizeof(error_event_t), EM_EVENT_TYPE_SW, - error_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Alloc failed"); - - error = em_event_pointer(event); - error->dest = error_shm->queue_b; - error->seq = 0; - error->fatal = 0; - - ret = em_send(event, error_shm->queue_a); - test_fatal_if(ret != EM_OK, "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, error_shm->queue_a); - - /* Send event to EO C. No dest queue stored since fatal flag is set */ - event = em_alloc(sizeof(error_event_t), EM_EVENT_TYPE_SW, - error_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Alloc failed"); - - error = em_event_pointer(event); - error->dest = EM_QUEUE_UNDEF; /* Don't care, never resent */ - error->seq = 0; - error->fatal = 1; /* Generate a fatal error when received */ - - ret = em_send(event, error_shm->queue_c); - test_fatal_if(ret != EM_OK, "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, error_shm->queue_c); -} - -void test_stop(const appl_conf_t *appl_conf) -{ - const int core = em_core_id(); - const em_eo_t eo_a = error_shm->eo_error_a.eo; - const em_eo_t eo_b = error_shm->eo_error_b.eo; - const em_eo_t eo_c = error_shm->eo_error_c.eo; - em_status_t stat; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - stat = em_eo_stop_sync(eo_a); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO A stop failed!"); - stat = em_eo_stop_sync(eo_b); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO B stop failed!"); - stat = em_eo_stop_sync(eo_c); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO C stop failed!"); - - stat = em_eo_unregister_error_handler(eo_a); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO A unregister error handler failed!"); - - stat = em_eo_delete(eo_a); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO A delete failed!"); - stat = em_eo_delete(eo_b); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO B delete failed!"); - stat = em_eo_delete(eo_c); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO C delete failed!"); - - stat = em_unregister_error_handler(); - if (stat != EM_OK) - APPL_EXIT_FAILURE("Unregister error handler failed!"); -} - -void test_term(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) - env_shared_free(error_shm); - - fflush(NULL); -} - -/** - * @private - * - * EO specific error handler. - * - * @return The function may not return depending on implementation/error - * code/error scope. If it returns, the return value is the original - * (or modified) error code from the caller. - */ -static em_status_t -eo_specific_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args) -{ - return combined_error_handler("Appl EO specific error handler", - eo, error, escope, args); -} - -/** - * @private - * - * Global error handler. - * - * @return The function may not return depending on implementation/error - * code/error scope. If it returns, the return value is the original - * (or modified) error code from the caller. - */ -static em_status_t -global_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args) -{ - return combined_error_handler("Appl Global error handler ", - eo, error, escope, args); -} - -/** - * @private - * - * Error handler implementation for both global and EO specific handlers - * registered by the application. - * - * @return The function may not return depending on implementation/error - * code/error scope. If it returns, the return value is the original - * (or modified) error code from the caller. - */ -static em_status_t -combined_error_handler(const char *handler_name, em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args) -{ - em_queue_t queue; - const char *str; - unsigned int seq; - - /* First handle EM-internal errors */ - if (EM_ESCOPE_API(escope)) { - /* EM API error: call em_error_format_string() */ - char error_str[512]; - - em_error_format_string(error_str, sizeof(error_str), eo, - error, escope, args); - - APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n" - "- EM info: %s", - handler_name, eo, error, escope, error_str); - - if (EM_ERROR_IS_FATAL(error)) - abort(); - - return error; - } - - /* - * Application Errors: - */ - - /* Application FATAL: */ - if (EM_ERROR_IS_FATAL(error)) { - if (error == TEST_ERROR_FATAL) { - /* - * Application handling of test FATAL error. - * Print it and return since it's a fake fatal error. - */ - APPL_PRINT("THIS IS A FATAL ERROR!!\n" - "%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n" - "Return from fatal.\n\n", - handler_name, eo, error, escope); - return error; - } - - /* Real application FATAL error - abort! */ - APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n", - handler_name, eo, error, escope); - abort(); - } - - /* Application non-fatal: */ - switch (escope) { - case APPL_ESCOPE_STR: - str = va_arg(args, const char*); - APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\t" - "ARGS: %s\n", handler_name, eo, error, - escope, str); - break; - - case APPL_ESCOPE_STR_Q: - str = va_arg(args, const char*); - queue = va_arg(args, em_queue_t); - APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\t" - "ARGS: %s %" PRI_QUEUE "\n", handler_name, - eo, error, escope, str, queue); - break; - - case APPL_ESCOPE_STR_Q_SEQ: - str = va_arg(args, const char*); - queue = va_arg(args, em_queue_t); - seq = va_arg(args, unsigned int); - APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\t" - "ARGS: %s %" PRI_QUEUE " %u\n", handler_name, - eo, error, escope, str, queue, seq); - break; - - case APPL_ESCOPE_OTHER: - APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n", - handler_name, eo, error, escope); - break; - - default: /* Unexpected application error - abort!*/ - APPL_PRINT("%s: Unexpected Application Error:\n" - " EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n", - handler_name, eo, error, escope); - abort(); - }; - - return error; -} - -/** - * @private - * - * EO receive function. - * - * Report various kinds of errors to demonstrate the EM error handling API. - * - */ -static void -error_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx) -{ - em_queue_t dest; - eo_context_t *eo_ctx = eo_context; - error_event_t *errev; - em_status_t ret; - unsigned int seq; - - (void)type; - (void)q_ctx; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - errev = em_event_pointer(event); - dest = errev->dest; - errev->dest = queue; - - if (errev->fatal) { - APPL_PRINT("\nError log from %s [%u] on core %i!\n", - eo_ctx->name, errev->seq, em_core_id()); - em_free(event); - /* Report a fatal error */ - em_error(TEST_ERROR_FATAL, APPL_ESCOPE_OTHER); - return; - } - - APPL_PRINT("Error log from %s [%u] on core %i!\n", eo_ctx->name, - errev->seq, em_core_id()); - - /* error escope args */ - em_error(TEST_ERROR_1, APPL_ESCOPE_OTHER); - em_error(TEST_ERROR_2, APPL_ESCOPE_STR, "Second error"); - em_error(TEST_ERROR_3, APPL_ESCOPE_STR_Q, "Third error", queue); - em_error(TEST_ERROR_4, APPL_ESCOPE_STR_Q_SEQ, "Fourth error", - queue, errev->seq); - - /* Example of an API call error - generates an EM API error */ - em_free(EM_EVENT_UNDEF); - - errev->seq++; - /* store 'seq' before sending event */ - seq = errev->seq; - - delay_spin(DELAY_SPIN_COUNT); - - ret = em_send(event, dest); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, dest); - return; - } - - /* Request a fatal error to be generated every 8th event by 'EO C' */ - if ((seq & 0x7) == 0x7) { - /* Send a new event to EO 'C' to cause a fatal error */ - event = em_alloc(sizeof(error_event_t), EM_EVENT_TYPE_SW, - error_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Alloc failed"); - - errev = em_event_pointer(event); - errev->dest = EM_QUEUE_UNDEF; /* Don't care, never resent */ - errev->seq = 0; - errev->fatal = 1; - - ret = em_send(event, error_shm->queue_c); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, error_shm->queue_c); - return; - } - } -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -error_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); - - APPL_PRINT("Error test start (%s, eo id %" PRI_EO ")\n", - eo_ctx->name, eo); - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - * - */ -static em_status_t -error_stop(void *eo_context, em_eo_t eo) -{ - em_status_t stat; - - (void)eo_context; - - APPL_PRINT("Error test stop function (EO:%" PRI_EO ")\n", eo); - - stat = em_eo_remove_queue_all_sync(eo, EM_TRUE); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO:%" PRI_EO " rem all queues failed!", eo); - - return EM_OK; -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine error handler example. + * + * Demonstrate and test the Event Machine error handling functionality, + * see the API calls em_error(), em_register_error_handler(), + * em_eo_register_error_handler() etc. + * + * Three application EOs are created, each with a dedicated queue. + * An application specific global error handler is registered (thus replacing + * the EM default). Additionally EO A will register an EO specific error + * handler. + * When the EOs receive events (error_receive) they will generate errors by + * explicit calls to em_error() and by calling EM-API functions with invalid + * arguments. The registered error handlers simply print the error information + * on screen. + * + * Note: Lots of the API-call return values are left unchecked for errors + * (especially in setup) since the error handler demonstrated in this example + * is not designed to handle 'real' errors. + */ + +#include +#include +#include + +#include +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +#define APPL_ESCOPE_INIT 1 +#define APPL_ESCOPE_OTHER 2 +#define APPL_ESCOPE_STR 3 +#define APPL_ESCOPE_STR_Q 4 +#define APPL_ESCOPE_STR_Q_SEQ 5 + +#define DELAY_SPIN_COUNT 50000000 + +#define TEST_ERROR_FATAL EM_ERROR_SET_FATAL(0xdead) +#define TEST_ERROR_1 0x1111 +#define TEST_ERROR_2 0x2222 +#define TEST_ERROR_3 0x3333 +#define TEST_ERROR_4 0x4444 + +/** + * Error test event + */ +typedef struct { + /* Destination queue for the reply event */ + em_queue_t dest; + /* Sequence number */ + unsigned int seq; + /* Indicate whether to report a fatal error or not */ + int fatal; +} error_event_t; + +/** + * EO context of error test application + */ +typedef union { + struct { + /* EO Id */ + em_eo_t eo; + /* EO name */ + char name[16]; + }; + /* Pad to cache line size */ + uint8_t u8[ENV_CACHE_LINE_SIZE]; +} eo_context_t; + +/** + * Error test shared memory + */ +typedef struct { + /* Event pool used by this application */ + em_pool_t pool; + /* EO A context from shared memory region */ + eo_context_t eo_error_a ENV_CACHE_LINE_ALIGNED; + /* EO B context from shared memory region */ + eo_context_t eo_error_b ENV_CACHE_LINE_ALIGNED; + /* EO C context from shared memory region */ + eo_context_t eo_error_c ENV_CACHE_LINE_ALIGNED; + /* Queue IDs - shared vars, test is NOT concerned with perf */ + em_queue_t queue_a ENV_CACHE_LINE_ALIGNED; + em_queue_t queue_b; + em_queue_t queue_c; + /* Pad to cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} error_shm_t; + +static ENV_LOCAL error_shm_t *error_shm; + +/* + * Local function prototypes + */ +static em_status_t +error_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +error_stop(void *eo_context, em_eo_t eo); + +static void +error_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static em_status_t +global_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args); + +static em_status_t +eo_specific_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args); + +static em_status_t +combined_error_handler(const char *handler_name, em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Error Handler test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void test_init(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + if (core == 0) + error_shm = env_shared_reserve("ErrorSharedMem", + sizeof(error_shm_t)); + else + error_shm = env_shared_lookup("ErrorSharedMem"); + + if (error_shm == NULL) + em_error(EM_ERROR_SET_FATAL(0xec0de), APPL_ESCOPE_INIT, + "Error init failed on EM-core: %u\n", em_core_id()); + else if (core == 0) + memset(error_shm, 0, sizeof(error_shm_t)); +} + +/** + * Startup of the Error Handler test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void test_start(const appl_conf_t *appl_conf) +{ + em_eo_t eo; + em_event_t event; + em_queue_t queue; + em_status_t ret, start_ret = EM_ERROR; + error_event_t *error; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + error_shm->pool = appl_conf->pools[0]; + else + error_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%d\n" + " Application running on %u EM-cores (procs:%u, threads:%u)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, + error_shm->pool); + + /* + * Register the application specific global error handler + * This replaces the EM internal default error handler + */ + ret = em_register_error_handler(global_error_handler); + test_fatal_if(ret != EM_OK, + "Register global error handler:%" PRI_STAT "", ret); + + /* Create and start EO "A" */ + eo = em_eo_create("EO A", error_start, NULL, error_stop, NULL, + error_receive, &error_shm->eo_error_a); + + queue = em_queue_create("queue A", EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_NORMAL, EM_QUEUE_GROUP_DEFAULT, + NULL); + + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT ".\n" + "EO:%" PRI_EO ", queue:%" PRI_QUEUE "", + ret, eo, queue); + + error_shm->eo_error_a.eo = eo; + error_shm->queue_a = queue; + + /* Register an application 'EO A'-specific error handler */ + ret = em_eo_register_error_handler(eo, eo_specific_error_handler); + test_fatal_if(ret != EM_OK, + "Register EO error handler:%" PRI_STAT "", ret); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO A start:%" PRI_STAT " %" PRI_STAT ""); + + /* Create and start EO "B" */ + eo = em_eo_create("EO B", error_start, NULL, error_stop, NULL, + error_receive, &error_shm->eo_error_b); + + queue = em_queue_create("queue B", EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_NORMAL, EM_QUEUE_GROUP_DEFAULT, + NULL); + + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT ".\n" + "EO:%" PRI_EO ", queue:%" PRI_QUEUE "", + ret, eo, queue); + + error_shm->eo_error_b.eo = eo; + error_shm->queue_b = queue; + + /* + * Note: No 'EO B' specific error handler. Use the application specific + * global error handler instead. + */ + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO B start:%" PRI_STAT " %" PRI_STAT ""); + + /* Create and start EO "C" */ + eo = em_eo_create("EO C", error_start, NULL, error_stop, NULL, + error_receive, &error_shm->eo_error_c); + queue = em_queue_create("queue C", EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_HIGH, EM_QUEUE_GROUP_DEFAULT, + NULL); + + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT ".\n" + "EO:%" PRI_EO ", queue:%" PRI_QUEUE "", + ret, eo, queue); + + error_shm->eo_error_c.eo = eo; + error_shm->queue_c = queue; + + /* + * Note: No 'EO C' specific error handler. Use the application specific + * global error handler instead. + */ + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO C start:%" PRI_STAT " %" PRI_STAT ""); + + /* + * Send an event to EO A. + * Store EO B's queue as the destination queue for EO A. + */ + event = em_alloc(sizeof(error_event_t), EM_EVENT_TYPE_SW, + error_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Alloc failed"); + + error = em_event_pointer(event); + error->dest = error_shm->queue_b; + error->seq = 0; + error->fatal = 0; + + ret = em_send(event, error_shm->queue_a); + test_fatal_if(ret != EM_OK, "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, error_shm->queue_a); + + /* Send event to EO C. No dest queue stored since fatal flag is set */ + event = em_alloc(sizeof(error_event_t), EM_EVENT_TYPE_SW, + error_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Alloc failed"); + + error = em_event_pointer(event); + error->dest = EM_QUEUE_UNDEF; /* Don't care, never resent */ + error->seq = 0; + error->fatal = 1; /* Generate a fatal error when received */ + + ret = em_send(event, error_shm->queue_c); + test_fatal_if(ret != EM_OK, "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, error_shm->queue_c); +} + +void test_stop(const appl_conf_t *appl_conf) +{ + const int core = em_core_id(); + const em_eo_t eo_a = error_shm->eo_error_a.eo; + const em_eo_t eo_b = error_shm->eo_error_b.eo; + const em_eo_t eo_c = error_shm->eo_error_c.eo; + em_status_t stat; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + stat = em_eo_stop_sync(eo_a); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO A stop failed!"); + stat = em_eo_stop_sync(eo_b); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO B stop failed!"); + stat = em_eo_stop_sync(eo_c); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO C stop failed!"); + + stat = em_eo_unregister_error_handler(eo_a); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO A unregister error handler failed!"); + + stat = em_eo_delete(eo_a); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO A delete failed!"); + stat = em_eo_delete(eo_b); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO B delete failed!"); + stat = em_eo_delete(eo_c); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO C delete failed!"); + + stat = em_unregister_error_handler(); + if (stat != EM_OK) + APPL_EXIT_FAILURE("Unregister error handler failed!"); +} + +void test_term(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) + env_shared_free(error_shm); + + fflush(NULL); +} + +/** + * @private + * + * EO specific error handler. + * + * @return The function may not return depending on implementation/error + * code/error scope. If it returns, the return value is the original + * (or modified) error code from the caller. + */ +static em_status_t +eo_specific_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args) +{ + return combined_error_handler("Appl EO specific error handler", + eo, error, escope, args); +} + +/** + * @private + * + * Global error handler. + * + * @return The function may not return depending on implementation/error + * code/error scope. If it returns, the return value is the original + * (or modified) error code from the caller. + */ +static em_status_t +global_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args) +{ + return combined_error_handler("Appl Global error handler ", + eo, error, escope, args); +} + +/** + * @private + * + * Error handler implementation for both global and EO specific handlers + * registered by the application. + * + * @return The function may not return depending on implementation/error + * code/error scope. If it returns, the return value is the original + * (or modified) error code from the caller. + */ +static em_status_t +combined_error_handler(const char *handler_name, em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args) +{ + em_queue_t queue; + const char *str; + unsigned int seq; + + /* First handle EM-internal errors */ + if (EM_ESCOPE_API(escope)) { + /* EM API error: call em_error_format_string() */ + char error_str[512]; + + em_error_format_string(error_str, sizeof(error_str), eo, + error, escope, args); + + APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n" + "- EM info: %s", + handler_name, eo, error, escope, error_str); + + if (EM_ERROR_IS_FATAL(error)) + abort(); + + return error; + } + + /* + * Application Errors: + */ + + /* Application FATAL: */ + if (EM_ERROR_IS_FATAL(error)) { + if (error == TEST_ERROR_FATAL) { + /* + * Application handling of test FATAL error. + * Print it and return since it's a fake fatal error. + */ + APPL_PRINT("THIS IS A FATAL ERROR!!\n" + "%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n" + "Return from fatal.\n\n", + handler_name, eo, error, escope); + return error; + } + + /* Real application FATAL error - abort! */ + APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n", + handler_name, eo, error, escope); + abort(); + } + + /* Application non-fatal: */ + switch (escope) { + case APPL_ESCOPE_STR: + str = va_arg(args, const char*); + APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\t" + "ARGS: %s\n", handler_name, eo, error, + escope, str); + break; + + case APPL_ESCOPE_STR_Q: + str = va_arg(args, const char*); + queue = va_arg(args, em_queue_t); + APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\t" + "ARGS: %s %" PRI_QUEUE "\n", handler_name, + eo, error, escope, str, queue); + break; + + case APPL_ESCOPE_STR_Q_SEQ: + str = va_arg(args, const char*); + queue = va_arg(args, em_queue_t); + seq = va_arg(args, unsigned int); + APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\t" + "ARGS: %s %" PRI_QUEUE " %u\n", handler_name, + eo, error, escope, str, queue, seq); + break; + + case APPL_ESCOPE_OTHER: + APPL_PRINT("%s: EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n", + handler_name, eo, error, escope); + break; + + default: /* Unexpected application error - abort!*/ + APPL_PRINT("%s: Unexpected Application Error:\n" + " EO %" PRI_EO " error 0x%" PRIxSTAT " escope 0x%X\n", + handler_name, eo, error, escope); + abort(); + }; + + return error; +} + +/** + * @private + * + * EO receive function. + * + * Report various kinds of errors to demonstrate the EM error handling API. + * + */ +static void +error_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx) +{ + em_queue_t dest; + eo_context_t *eo_ctx = eo_context; + error_event_t *errev; + em_status_t ret; + unsigned int seq; + + (void)type; + (void)q_ctx; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + errev = em_event_pointer(event); + dest = errev->dest; + errev->dest = queue; + + if (errev->fatal) { + APPL_PRINT("\nError log from %s [%u] on core %i!\n", + eo_ctx->name, errev->seq, em_core_id()); + em_free(event); + /* Report a fatal error */ + em_error(TEST_ERROR_FATAL, APPL_ESCOPE_OTHER); + return; + } + + APPL_PRINT("Error log from %s [%u] on core %i!\n", eo_ctx->name, + errev->seq, em_core_id()); + + /* error escope args */ + em_error(TEST_ERROR_1, APPL_ESCOPE_OTHER); + em_error(TEST_ERROR_2, APPL_ESCOPE_STR, "Second error"); + em_error(TEST_ERROR_3, APPL_ESCOPE_STR_Q, "Third error", queue); + em_error(TEST_ERROR_4, APPL_ESCOPE_STR_Q_SEQ, "Fourth error", + queue, errev->seq); + + /* Example of an API call error - generates an EM API error */ + em_free(EM_EVENT_UNDEF); + + errev->seq++; + /* store 'seq' before sending event */ + seq = errev->seq; + + delay_spin(DELAY_SPIN_COUNT); + + ret = em_send(event, dest); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, dest); + return; + } + + /* Request a fatal error to be generated every 8th event by 'EO C' */ + if ((seq & 0x7) == 0x7) { + /* Send a new event to EO 'C' to cause a fatal error */ + event = em_alloc(sizeof(error_event_t), EM_EVENT_TYPE_SW, + error_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Alloc failed"); + + errev = em_event_pointer(event); + errev->dest = EM_QUEUE_UNDEF; /* Don't care, never resent */ + errev->seq = 0; + errev->fatal = 1; + + ret = em_send(event, error_shm->queue_c); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, error_shm->queue_c); + return; + } + } +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +error_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); + + APPL_PRINT("Error test start (%s, eo id %" PRI_EO ")\n", + eo_ctx->name, eo); + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t +error_stop(void *eo_context, em_eo_t eo) +{ + em_status_t stat; + + (void)eo_context; + + APPL_PRINT("Error test stop function (EO:%" PRI_EO ")\n", eo); + + stat = em_eo_remove_queue_all_sync(eo, EM_TRUE); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO:%" PRI_EO " rem all queues failed!", eo); + + return EM_OK; +} diff --git a/programs/example/queue_group/queue_group.c b/programs/example/queue_group/queue_group.c index 6f545858..ef603671 100644 --- a/programs/example/queue_group/queue_group.c +++ b/programs/example/queue_group/queue_group.c @@ -1,1096 +1,1096 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine queue group feature test. - * - * Creates an EO with two queues: a notification queue and a data event queue. - * The notif queue belongs to the default queue group and can be processed on - * any core while the data queue belongs to a newly created queue group called - * "test_qgrp". The EO-receive function receives a number of data events and - * then modifies the test queue group (i.e. changes the cores allowed to - * process events from the data event queue). The test is restarted when the - * queue group has been modified enough times to include each core at least - * once. - */ - -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Defines & macros - */ -#define TEST_PRINT_COUNT 5 -#define TEST_QGRP_NAME_LEN EM_QUEUE_GROUP_NAME_LEN -#define TEST_QGRP_NAME_BASE "QGrp" /* Usage: QGrp001, QGrp002 */ - -/** The maximum number of cores this test supports */ -#define MAX_CORES 64 - -/** - * The number of data events to allocate, these are sent many rounds through - * the data test_queue for each core mask in the tested queue group - */ -#define EVENT_DATA_ALLOC_NBR (MAX_CORES * 16) - -/** Round 'val' to the next multiple of 'N' */ -#define ROUND_UP(val, N) ((((val) + ((N) - 1)) / (N)) * (N)) - -/** - * EO context used by the application - * - * Cache line alignment and padding taken care of in 'qgrp_shm_t' - */ -typedef struct app_eo_ctx_t { - em_eo_t eo; - - em_queue_t notif_queue; - em_queue_group_t notif_qgrp; - - em_queue_t test_queue; - em_queue_type_t test_queue_type; - /** Has the test_queue been added to the EO? */ - bool test_queue_added; - - em_queue_group_t test_qgrp; - em_event_group_t event_group; - - char test_qgrp_name[TEST_QGRP_NAME_LEN]; - int test_qgrp_name_nbr; - - em_core_mask_t core_mask_max; - - uint64_t qgrp_modify_count; - uint64_t modify_threshold; - uint64_t print_threshold; - uint64_t tot_modify_count; - uint64_t tot_modify_count_check; -} app_eo_ctx_t; - -/** - * Queue context for the test queue (receives data events, NOT notifications) - * - * Cache line alignment and padding taken care of in 'qgrp_shm_t' - */ -typedef struct app_q_ctx_t { - /* - * Use atomic operations to suit any queue type. - * An atomic queue does not need this but parallel and - * parallel-ordered do so opt to always use. - */ - env_atomic64_t event_count; -} app_q_ctx_t; - -/** - * Application event - */ -typedef union app_event_t { - /** Event id: notification */ - #define EVENT_NOTIF 1 - /** Event id: data */ - #define EVENT_DATA 2 - - /** Id is first in all events */ - uint32_t id; - - /** Event: notification */ - struct { - uint32_t id; - enum { - NOTIF_START_DONE, - NOTIF_RESTART, - NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST, - NOTIF_QUEUE_GROUP_MODIFY_DONE, - NOTIF_EVENT_GROUP_DATA_DONE - } type; - - em_queue_group_t used_group; - em_core_mask_t core_mask; - } notif; - - /** Event: data */ - struct { - uint32_t id; - em_queue_group_t used_group; - } data; -} app_event_t; - -/** - * Statistics for each core, pad to cache line size - */ -typedef union core_stat_t { - uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - struct { - uint64_t event_count; - }; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) == ENV_CACHE_LINE_SIZE, - CORE_STAT_T__SIZE_ERROR); - -/** - * Queue Group test shared memory - */ -typedef struct qgrp_shm_t { - em_pool_t pool ENV_CACHE_LINE_ALIGNED; - - /* Number of EM cores running the application */ - unsigned int core_count; - - /** The application has seen the exit_flag and is ready for tear down */ - env_atomic32_t exit_ack; - - app_eo_ctx_t app_eo_ctx ENV_CACHE_LINE_ALIGNED; - - app_q_ctx_t app_q_ctx ENV_CACHE_LINE_ALIGNED; - - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; -} qgrp_shm_t; - -COMPILE_TIME_ASSERT(sizeof(qgrp_shm_t) % ENV_CACHE_LINE_SIZE == 0, - QGRP_SHM_T__SIZE_ERROR); -COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_eo_ctx) % ENV_CACHE_LINE_SIZE - == 0, OFFSETOF_EO_CTX_ERROR); -COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_q_ctx) % ENV_CACHE_LINE_SIZE - == 0, OFFSETOF_Q_CTX_ERROR); -COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, core_stat) % ENV_CACHE_LINE_SIZE - == 0, OFFSETOF_CORE_STAT_ERROR); - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL qgrp_shm_t *qgrp_shm; - -static void -receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context); - -static inline void -receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx); - -static void -notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue); -static void -notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue); -static void -notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue); - -static inline void -receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx); - -static void await_exit_ack(void); - -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -stop(void *eo_context, em_eo_t eo); - -static em_status_t -start_local(void *eo_context, em_eo_t eo); - -static em_status_t -stop_local(void *eo_context, em_eo_t eo); - -static void -next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Queue Group test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void test_init(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - if (core == 0) { - qgrp_shm = env_shared_reserve("QueueGroupSharedMem", - sizeof(qgrp_shm_t)); - em_register_error_handler(test_error_handler); - } else { - qgrp_shm = env_shared_lookup("QueueGroupSharedMem"); - } - - if (qgrp_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Queue Group test init failed on EM-core: %u\n", - em_core_id()); - } else if (core == 0) { - memset(qgrp_shm, 0, sizeof(qgrp_shm_t)); - } -} - -/** - * Startup of the Queue Group test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void test_start(const appl_conf_t *appl_conf) -{ - app_event_t *app_event; - em_event_t event; - em_queue_group_t default_group; - em_queue_t notif_queue; - em_event_group_t event_group; - em_status_t err, start_err = EM_ERROR; - em_eo_t eo; - em_notif_t notif_tbl[1]; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - qgrp_shm->pool = appl_conf->pools[0]; - else - qgrp_shm->pool = EM_POOL_DEFAULT; - - /* Store the number of EM-cores running the application */ - qgrp_shm->core_count = appl_conf->core_count; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%d\n" - " Application running on %u EM-cores (procs:%u, threads:%u)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, - qgrp_shm->pool); - - test_fatal_if(qgrp_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - test_fatal_if(qgrp_shm->core_count > MAX_CORES, - "Test started on too many cores(%i)!\n" - "Max supported core count for this test is: %u\n", - qgrp_shm->core_count, MAX_CORES); - - env_atomic32_init(&qgrp_shm->exit_ack); - env_atomic32_set(&qgrp_shm->exit_ack, 0); - - /* - * Create the application EO and queues - */ - eo = em_eo_create("test_appl_queue_group", - start, start_local, stop, stop_local, - receive, &qgrp_shm->app_eo_ctx); - - default_group = em_queue_group_find("default"); - /* Verify that the find-func worked correctly. */ - test_fatal_if(default_group != EM_QUEUE_GROUP_DEFAULT, - "Default queue group(%" PRI_QGRP ") not found!", - default_group); - - notif_queue = em_queue_create("notif_queue", EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_HIGH, default_group, NULL); - test_fatal_if(notif_queue == EM_QUEUE_UNDEF, - "Notification queue creation failed!"); - - err = em_eo_add_queue_sync(eo, notif_queue); - test_fatal_if(err != EM_OK, - "Notification queue add to EO failed:%" PRI_STAT "", err); - - event_group = em_event_group_create(); - test_fatal_if(event_group == EM_EVENT_GROUP_UNDEF, - "Event group creation failed!"); - - qgrp_shm->app_eo_ctx.eo = eo; - qgrp_shm->app_eo_ctx.notif_queue = notif_queue; - qgrp_shm->app_eo_ctx.notif_qgrp = default_group; - qgrp_shm->app_eo_ctx.event_group = event_group; - - APPL_PRINT("Starting EO:%" PRI_EO "\t" - "- Notification Queue=%" PRI_QUEUE "\n", eo, notif_queue); - - event = em_alloc(sizeof(app_event_t), EM_EVENT_TYPE_SW, - qgrp_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "Notification event allocation failed"); - app_event = em_event_pointer(event); - memset(app_event, 0, sizeof(*app_event)); - app_event->notif.id = EVENT_NOTIF; - app_event->notif.type = NOTIF_START_DONE; - /* Verify group when receiving */ - app_event->notif.used_group = default_group; - - notif_tbl[0].event = event; - notif_tbl[0].queue = notif_queue; - notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; - - err = em_eo_start(eo, &start_err, NULL, 1, notif_tbl); - test_fatal_if(err != EM_OK, - "em_eo_start(%" PRI_EO "):%" PRI_STAT "", eo, err); - test_fatal_if(start_err != EM_OK, - "EO start function:%" PRI_STAT "", - start_err); -} - -void test_stop(const appl_conf_t *appl_conf) -{ - const int core = em_core_id(); - em_status_t err; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %02d\n", __func__, core); - - /* Await 'exit_ack' to be set by the EO */ - await_exit_ack(); - - em_eo_t eo = qgrp_shm->app_eo_ctx.eo; - em_event_group_t egrp; - em_notif_t notif_tbl[1] = { {.event = EM_EVENT_UNDEF} }; - int num_notifs; - - err = em_eo_stop_sync(eo); - test_fatal_if(err != EM_OK, - "EO stop:%" PRI_STAT " EO:%" PRI_EO "", err, eo); - - /* No more dispatching of the EO's events, egrp can be freed */ - - egrp = qgrp_shm->app_eo_ctx.event_group; - if (!em_event_group_is_ready(egrp)) { - num_notifs = em_event_group_get_notif(egrp, 1, notif_tbl); - err = em_event_group_abort(egrp); - if (err == EM_OK && num_notifs == 1) - em_free(notif_tbl[0].event); - } - err = em_event_group_delete(egrp); - test_fatal_if(err != EM_OK, - "egrp:%" PRI_EGRP " delete:%" PRI_STAT " EO:%" PRI_EO "", - egrp, err, eo); -} - -void test_term(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %02d\n", __func__, core); - - if (core == 0) { - env_shared_free(qgrp_shm); - em_unregister_error_handler(); - } -} - -/** - * Receive function for the test EO - */ -static void -receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - app_eo_ctx_t *eo_ctx = eo_context; - app_event_t *app_event = em_event_pointer(event); - /* Only set for the test_queue */ - app_q_ctx_t *q_ctx = queue_context; - - test_fatal_if(em_get_type_major(type) != EM_EVENT_TYPE_SW, - "Unexpected event type: 0x%x", type); - - if (unlikely(appl_shm->exit_flag)) { - /* Handle exit request */ - uint32_t exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); - - if (exit_ack) { - em_free(event); - return; - } - - if (app_event->id == EVENT_NOTIF && - (app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST || - app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE)) { - /* can be set by multiple cores */ - if (!exit_ack) - env_atomic32_set(&qgrp_shm->exit_ack, 1); - em_free(event); - return; - } - /* - * Handle events normally until a MODIFY_DONE has been - * received and exit_ack has been set. - */ - } - - switch (app_event->id) { - case EVENT_NOTIF: - receive_event_notif(eo_ctx, event, queue, q_ctx); - break; - case EVENT_DATA: - receive_event_data(eo_ctx, event, queue, q_ctx); - break; - default: - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Unknown event id(%u)!", app_event->id); - break; - } -} - -/** - * Handle the notification events received through the notif_queue - */ -static inline void -receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx) -{ - app_event_t *app_event = em_event_pointer(event); - em_status_t err; - (void)q_ctx; - - switch (app_event->notif.type) { - case NOTIF_RESTART: - APPL_PRINT("\n" - "***********************************************\n" - "!!! Restarting test !!!\n" - "***********************************************\n" - "\n\n\n"); - eo_ctx->tot_modify_count_check = 0; - notif_start_done(eo_ctx, event, queue); - break; - - case NOTIF_START_DONE: - notif_start_done(eo_ctx, event, queue); - break; - - case NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST: - err = em_eo_add_queue_sync(eo_ctx->eo, eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "EO add queue:%" PRI_STAT "", err); - eo_ctx->test_queue_added = true; - notif_queue_group_modify_done(eo_ctx, event, queue); - break; - - case NOTIF_QUEUE_GROUP_MODIFY_DONE: - notif_queue_group_modify_done(eo_ctx, event, queue); - break; - - case NOTIF_EVENT_GROUP_DATA_DONE: - notif_event_group_data_done(eo_ctx, event, queue); - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Unknown notification type:%i!", - app_event->notif.type); - break; - } -} - -/** Helper for receive_event_notif() */ -static void -notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue) -{ - em_queue_group_t new_qgrp; - em_queue_type_t new_qtype; - const char *new_qtype_str; - em_core_mask_t core_mask; - em_notif_t notif_tbl; - em_status_t err; - const em_queue_group_t qgrp_curr = em_queue_get_group(queue); - app_event_t *app_event = em_event_pointer(event); - - test_fatal_if(app_event->notif.used_group != qgrp_curr, - "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", - app_event->notif.used_group, qgrp_curr); - - /* Create a test queue group */ - snprintf(&eo_ctx->test_qgrp_name[0], - sizeof(eo_ctx->test_qgrp_name), "%s%03i", - TEST_QGRP_NAME_BASE, eo_ctx->test_qgrp_name_nbr); - - eo_ctx->test_qgrp_name[TEST_QGRP_NAME_LEN - 1] = '\0'; - eo_ctx->test_qgrp_name_nbr = (eo_ctx->test_qgrp_name_nbr + 1) - % 1000; /* Range 0-999 */ - - /* Start with EM core-0 (it's always running) */ - em_core_mask_zero(&core_mask); - em_core_mask_set(0, &core_mask); - - /* Re-use event */ - app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST; - app_event->notif.used_group = eo_ctx->notif_qgrp; - - notif_tbl.event = event; /* = app_event->notif */ - notif_tbl.queue = queue; - notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; - - em_core_mask_copy(&app_event->notif.core_mask, &core_mask); - - /* - * Create the queue group! - */ - new_qgrp = em_queue_group_create(eo_ctx->test_qgrp_name, &core_mask, - 1, ¬if_tbl); - test_fatal_if(new_qgrp == EM_QUEUE_GROUP_UNDEF, - "Queue group creation failed!"); - - if (eo_ctx->test_qgrp != EM_QUEUE_GROUP_UNDEF) { - /* - * Delete group - no need for notifs since 'modify to zero - * core mask' already done & queue deleted from group. Do the - * delete after the create to force creation of another - * queue group -> avoids always running the test with the same - * queue group. - */ - err = em_queue_group_delete(eo_ctx->test_qgrp, 0, NULL); - test_fatal_if(err != EM_OK, - "Qgrp delete:%" PRI_STAT "", err); - } - /* Store the new queue group to use for this test round */ - eo_ctx->test_qgrp = new_qgrp; - - /* - * Create a test queue for data events. The queue belongs to - * the test queue group. Change the queue type for every new - * test run. - */ - switch (eo_ctx->test_queue_type) { - case EM_QUEUE_TYPE_ATOMIC: - new_qtype = EM_QUEUE_TYPE_PARALLEL; - new_qtype_str = "PARALLEL"; - break; - case EM_QUEUE_TYPE_PARALLEL: - new_qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; - new_qtype_str = "PARALLEL_ORDERED"; - break; - default: - new_qtype = EM_QUEUE_TYPE_ATOMIC; - new_qtype_str = "ATOMIC"; - break; - } - eo_ctx->test_queue_type = new_qtype; - eo_ctx->test_queue = em_queue_create("test_queue", - eo_ctx->test_queue_type, - EM_QUEUE_PRIO_NORMAL, - eo_ctx->test_qgrp, NULL); - test_fatal_if(eo_ctx->test_queue == EM_QUEUE_UNDEF, - "Test queue creation failed!"); - eo_ctx->test_queue_added = false; - - APPL_PRINT("\n" - "Created test queue:%" PRI_QUEUE " type:%s(%u)\t" - "queue group:%" PRI_QGRP " (name:\"%s\")\n", - eo_ctx->test_queue, new_qtype_str, eo_ctx->test_queue_type, - eo_ctx->test_qgrp, eo_ctx->test_qgrp_name); - - memset(&qgrp_shm->app_q_ctx, 0, sizeof(qgrp_shm->app_q_ctx)); - env_atomic64_init(&qgrp_shm->app_q_ctx.event_count); - - err = em_queue_set_context(eo_ctx->test_queue, &qgrp_shm->app_q_ctx); - test_fatal_if(err != EM_OK, "Set queue context:%" PRI_STAT "", err); - /* - * Synchronize EO context. Event is sent through notification, - * which might have happened before we write the eo_ctx. - */ - env_sync_mem(); -} - -/** Helper for receive_event_notif() */ -static void -notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue) -{ - em_status_t err; - const em_queue_group_t qgrp_curr = em_queue_get_group(queue); - app_event_t *app_event = em_event_pointer(event); - - test_fatal_if(app_event->notif.used_group != qgrp_curr, - "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", - app_event->notif.used_group, qgrp_curr); - - if (unlikely(em_core_mask_iszero(&app_event->notif.core_mask))) { - APPL_PRINT("\n" - "*************************************\n" - "All cores removed from QueueGroup!\n" - "*************************************\n"); - - test_fatal_if(eo_ctx->tot_modify_count != - eo_ctx->tot_modify_count_check, - "Modify count != actual count:\t" - "%" PRIu64 " vs %" PRIu64 "", - eo_ctx->tot_modify_count, - eo_ctx->tot_modify_count_check); - - err = em_eo_remove_queue_sync(eo_ctx->eo, - eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "Remove test queue:%" PRI_STAT "", err); - eo_ctx->test_queue_added = false; - - APPL_PRINT("Deleting test queue:%" PRI_QUEUE ",\t" - "Qgrp ID:%" PRI_QGRP " (name:\"%s\")\n", - eo_ctx->test_queue, eo_ctx->test_qgrp, - eo_ctx->test_qgrp_name); - - err = em_queue_delete(eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "Delete test queue:%" PRI_STAT "", err); - eo_ctx->test_queue = EM_QUEUE_UNDEF; - - /* - * Delete the queue group later in restart after the - * creation of a new group. This forces the creation - * and usage of at least two different queue groups. - */ - app_event->notif.id = EVENT_NOTIF; - app_event->notif.type = NOTIF_RESTART; - app_event->notif.used_group = eo_ctx->notif_qgrp; - err = em_send(event, eo_ctx->notif_queue); - if (unlikely(err != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send to notif queue:%" PRI_STAT "", err); - } - } else { - em_notif_t egroup_notif_tbl[1]; - - /* Reuse the event */ - app_event->notif.id = EVENT_NOTIF; - app_event->notif.type = NOTIF_EVENT_GROUP_DATA_DONE; - app_event->notif.used_group = eo_ctx->notif_qgrp; - - egroup_notif_tbl[0].event = event; - egroup_notif_tbl[0].queue = eo_ctx->notif_queue; - egroup_notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; - - err = em_event_group_apply(eo_ctx->event_group, - eo_ctx->modify_threshold, 1, - egroup_notif_tbl); - test_fatal_if(err != EM_OK, - "em_event_group_apply():%" PRI_STAT "", err); - - for (int i = 0; i < EVENT_DATA_ALLOC_NBR; i++) { - em_event_t ev_data = em_alloc(sizeof(app_event_t), - EM_EVENT_TYPE_SW, - qgrp_shm->pool); - test_fatal_if(ev_data == EM_EVENT_UNDEF, - "Event alloc failed!"); - - app_event_t *data_event = em_event_pointer(ev_data); - - data_event->id = EVENT_DATA; - data_event->data.used_group = eo_ctx->test_qgrp; - - err = em_send_group(ev_data, eo_ctx->test_queue, - eo_ctx->event_group); - if (unlikely(err != EM_OK)) { - em_free(ev_data); - test_fatal_if(!appl_shm->exit_flag, - "Send to test queue:%" PRI_STAT "", - err); - } - } - } -} - -/** Helper for receive_event_notif() */ -static void -notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue) -{ - em_core_mask_t core_mask, used_mask; - em_notif_t notif_tbl; - em_status_t err; - int core_count; - int i; - const em_queue_group_t qgrp_curr = em_queue_get_group(queue); - app_event_t *app_event = em_event_pointer(event); - - test_fatal_if(app_event->notif.used_group != qgrp_curr, - "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", - app_event->notif.used_group, qgrp_curr); - - uint64_t mod_cnt = ++eo_ctx->qgrp_modify_count; - - eo_ctx->tot_modify_count_check++; - - err = em_queue_group_get_mask(eo_ctx->test_qgrp, &used_mask); - test_fatal_if(err != EM_OK, - "Get queue group mask:%" PRI_STAT "", err); - - /* Get the next core mask for the test group */ - next_core_mask(/*New*/ &core_mask, /*Max*/ &eo_ctx->core_mask_max, - eo_ctx->tot_modify_count_check); - - if (mod_cnt >= eo_ctx->print_threshold || - em_core_mask_iszero(&core_mask)) { - char used_mask_str[EM_CORE_MASK_STRLEN]; - char core_mask_str[EM_CORE_MASK_STRLEN]; - - em_core_mask_tostr(used_mask_str, EM_CORE_MASK_STRLEN, - &used_mask); - em_core_mask_tostr(core_mask_str, EM_CORE_MASK_STRLEN, - &core_mask); - APPL_PRINT("\n" - "****************************************\n" - "Received %" PRIu64 " events on Q:%" PRI_QUEUE ":\n" - " QueueGroup:%" PRI_QGRP ", Curr Coremask:%s\n" - "Now Modifying:\n" - " QueueGroup:%" PRI_QGRP ", New Coremask:%s\n" - "****************************************\n", - env_atomic64_get(&qgrp_shm->app_q_ctx.event_count), - eo_ctx->test_queue, eo_ctx->test_qgrp, - used_mask_str, eo_ctx->test_qgrp, core_mask_str); - - eo_ctx->qgrp_modify_count = 0; - } - - /* - * Sanity check: verify that all cores that process the queue - * group actually received events and that other cores do not - * get any events. - */ - core_count = qgrp_shm->core_count; - for (i = 0; i < core_count; i++) { - const uint64_t ev_count = qgrp_shm->core_stat[i].event_count; - char mstr[EM_CORE_MASK_STRLEN]; - - if (em_core_mask_isset(i, &used_mask)) { - if (unlikely(ev_count == 0)) { - em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, - &used_mask); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "No events on core%i, mask:%s", - i, mstr); - } - } else if (unlikely(ev_count > 0)) { - em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, - &used_mask); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Events:%" PRIu64 " on inv.core%i, mask:%s", - ev_count, i, mstr); - } - } - - memset(qgrp_shm->core_stat, 0, sizeof(qgrp_shm->core_stat)); - env_atomic64_set(&qgrp_shm->app_q_ctx.event_count, 0); - - /* Reuse the event */ - app_event->id = EVENT_NOTIF; - app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE; - app_event->notif.used_group = eo_ctx->notif_qgrp; - em_core_mask_copy(&app_event->notif.core_mask, &core_mask); - - notif_tbl.event = event; - notif_tbl.queue = eo_ctx->notif_queue; - notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; - - err = em_queue_group_modify(eo_ctx->test_qgrp, &core_mask, - 1, ¬if_tbl); - test_fatal_if(err != EM_OK, - "em_queue_group_modify():%" PRI_STAT "", err); -} - -/** - * Handle the test data events received through the test_queue - * - * Check that the queue group is valid and send the data back to the same - * queue for another round. - * The last event should trigger a notification event to be sent to the - * notif_queue to begin the queue group modification sequence. - */ -static inline void -receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx) -{ - int core_id = em_core_id(); - app_event_t *app_event = em_event_pointer(event); - em_queue_group_t qgrp_curr = em_queue_get_group(queue); - em_core_mask_t used_mask; - em_status_t err; - const uint64_t event_count = - env_atomic64_add_return(&q_ctx->event_count, 1); - qgrp_shm->core_stat[core_id].event_count++; - - /* Verify that the queue group is correct & expected */ - test_fatal_if(app_event->data.used_group != qgrp_curr, - "Queue grp mismatch:%" PRI_QGRP "!=%" PRI_QGRP "", - app_event->data.used_group, qgrp_curr); - - /* Verify that this core is a valid receiver of events in this group */ - err = em_queue_group_get_mask(qgrp_curr, &used_mask); - test_fatal_if(err != EM_OK, - "Get queue group mask:%" PRI_STAT "", err); - - if (unlikely(!em_core_mask_isset(core_id, &used_mask))) { - char mask_str[EM_CORE_MASK_STRLEN]; - - em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, &used_mask); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Core bit not set in core mask! core:%02i mask:%s", - core_id, mask_str); - } - - /* - * Handle the test data event - */ - if (event_count <= eo_ctx->modify_threshold - EVENT_DATA_ALLOC_NBR) { - /* Send the data event for another round */ - err = em_send_group(event, eo_ctx->test_queue, - eo_ctx->event_group); - if (unlikely(err != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send to test queue:%" PRI_STAT "", err); - } - } else if (event_count <= eo_ctx->modify_threshold) { - /* - * Free the events for the last round, an event group - * notification event should be triggered when the last event - * has been processed - */ - em_free(event); - } else { - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xacdc, - "Invalid event count(%u)!", event_count); - } -} - -/** - * Await exit_ack to be set by the EO. - */ -static void await_exit_ack(void) -{ - env_time_t t_max = env_time_global_from_ns(20 * 1000000000ULL); /*20s*/ - env_time_t t_now = ENV_TIME_NULL; - env_time_t t_start = env_time_global(); - env_time_t t_end = env_time_sum(t_start, t_max); - uint64_t ns; - uint32_t exit_ack = 0; - - long double sec; - - do { - if (!exit_ack) - em_dispatch(1); - exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); - t_now = env_time_global(); - } while (!exit_ack && env_time_cmp(t_now, t_end) < 0); - - ns = env_time_diff_ns(t_now, t_start); - sec = (long double)ns / 1000000000.0; - - if (unlikely(!exit_ack)) { - test_error(EM_ERR_TIMEOUT, 0xdead, - "Timeout: No exit_ack within %Lfs!\n", sec); - return; - } - - APPL_PRINT("exit_ack in %Lfs on EM-core:%02d => Tearing down\n", - sec, em_core_id()); -} - -/** - * Global start function for the test EO - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - app_eo_ctx_t *eo_ctx = eo_context; - uint64_t tot_modify_count = 0; - uint64_t tmp; - int ret; - - (void)eo; - (void)conf; - - APPL_PRINT("Queue Group Test - Global EO Start\n"); - - snprintf(&eo_ctx->test_qgrp_name[0], - sizeof(eo_ctx->test_qgrp_name), - "%s%03i", TEST_QGRP_NAME_BASE, 0); - - em_core_mask_zero(&eo_ctx->core_mask_max); - em_core_mask_set_count(qgrp_shm->core_count, &eo_ctx->core_mask_max); - - /* - * The values used below in calculations are derived from the way the - * next_core_mask() function calculates the next core mask to use. - */ - ret = em_core_mask_get_bits(&tmp, 1, &eo_ctx->core_mask_max); - if (unlikely(ret != 1)) { - char mask_str[EM_CORE_MASK_STRLEN]; - - em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, - &eo_ctx->core_mask_max); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "em_core_mask_get_bits(coremask=%s), ret=%i", - mask_str, ret); - } - - do { - tot_modify_count += (tmp & 0xFF) + 1; - tmp = (tmp >> 4); - if (tmp < 0x10) - break; - } while (tmp); - - tot_modify_count -= 1; - - eo_ctx->tot_modify_count = tot_modify_count; - eo_ctx->tot_modify_count_check = 0; - - eo_ctx->print_threshold = tot_modify_count / TEST_PRINT_COUNT; - - if (eo_ctx->print_threshold == 0) - eo_ctx->print_threshold = 1; - - /* - * 256*15 - 1 is the maximum number of core masks tested when 64 - * cores (max) are running this test. - */ - eo_ctx->modify_threshold = - ((256 * 15 * 0x1000) - 1) / tot_modify_count; - eo_ctx->modify_threshold = ROUND_UP(eo_ctx->modify_threshold, - EVENT_DATA_ALLOC_NBR); - - APPL_PRINT("\n" - "*******************************************************\n" - "Test threshold values set:\n" - " Tot group modifies: %" PRIu64 "\n" - " Events received on group before modify: %" PRIu64 "\n" - " Group modify print threshold: %" PRIu64 "\n" - "*******************************************************\n" - "\n", - tot_modify_count, eo_ctx->modify_threshold, - eo_ctx->print_threshold); - - return EM_OK; -} - -/** - * Global stop function for the test EO - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - em_status_t err; - app_eo_ctx_t *eo_ctx = eo_context; - - /* remove and delete all of the EO's queues */ - err = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(err != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - err, eo); - if (eo_ctx->test_queue != EM_QUEUE_UNDEF && !eo_ctx->test_queue_added) { - err = em_queue_delete(eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "Delete test queue:%" PRI_STAT "", err); - } - - /* delete the EO at the end of the stop-function */ - err = em_eo_delete(eo); - test_fatal_if(err != EM_OK, - "EO delete:%" PRI_STAT " EO:%" PRI_EO "", - err, eo); - APPL_PRINT("Queue Group Test - Global EO Stop\n"); - - return EM_OK; -} - -/** - * Local start function for the test EO - */ -static em_status_t -start_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - (void)eo; - - APPL_PRINT("Queue Group Test - Local EO Start: EM-core:%02d\n", - em_core_id()); - return EM_OK; -} - -/** - * Local stop function for the test EO - */ -static em_status_t -stop_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - (void)eo; - - APPL_PRINT("Queue Group Test - Local EO Stop: EM-core:%02d\n", - em_core_id()); - return EM_OK; -} - -/** - * Update the core mask: - * E.g. if max_mask is 0xFFFF: 0x0001-0x0100 (256 masks), - * 0x0010->0x1000 (256 masks), 0x0100-0x0000 (255 masks) - */ -static void -next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count) -{ - uint64_t mask64 = ((uint64_t)(count % 256) + 1) << (4 * (count / 256)); - - em_core_mask_zero(new_mask); - em_core_mask_set_bits(&mask64, 1, new_mask); - em_core_mask_and(new_mask, new_mask, max_mask); -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine queue group feature test. + * + * Creates an EO with two queues: a notification queue and a data event queue. + * The notif queue belongs to the default queue group and can be processed on + * any core while the data queue belongs to a newly created queue group called + * "test_qgrp". The EO-receive function receives a number of data events and + * then modifies the test queue group (i.e. changes the cores allowed to + * process events from the data event queue). The test is restarted when the + * queue group has been modified enough times to include each core at least + * once. + */ + +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Defines & macros + */ +#define TEST_PRINT_COUNT 5 +#define TEST_QGRP_NAME_LEN EM_QUEUE_GROUP_NAME_LEN +#define TEST_QGRP_NAME_BASE "QGrp" /* Usage: QGrp001, QGrp002 */ + +/** The maximum number of cores this test supports */ +#define MAX_CORES 64 + +/** + * The number of data events to allocate, these are sent many rounds through + * the data test_queue for each core mask in the tested queue group + */ +#define EVENT_DATA_ALLOC_NBR (MAX_CORES * 16) + +/** Round 'val' to the next multiple of 'N' */ +#define ROUND_UP(val, N) ((((val) + ((N) - 1)) / (N)) * (N)) + +/** + * EO context used by the application + * + * Cache line alignment and padding taken care of in 'qgrp_shm_t' + */ +typedef struct app_eo_ctx_t { + em_eo_t eo; + + em_queue_t notif_queue; + em_queue_group_t notif_qgrp; + + em_queue_t test_queue; + em_queue_type_t test_queue_type; + /** Has the test_queue been added to the EO? */ + bool test_queue_added; + + em_queue_group_t test_qgrp; + em_event_group_t event_group; + + char test_qgrp_name[TEST_QGRP_NAME_LEN]; + int test_qgrp_name_nbr; + + em_core_mask_t core_mask_max; + + uint64_t qgrp_modify_count; + uint64_t modify_threshold; + uint64_t print_threshold; + uint64_t tot_modify_count; + uint64_t tot_modify_count_check; +} app_eo_ctx_t; + +/** + * Queue context for the test queue (receives data events, NOT notifications) + * + * Cache line alignment and padding taken care of in 'qgrp_shm_t' + */ +typedef struct app_q_ctx_t { + /* + * Use atomic operations to suit any queue type. + * An atomic queue does not need this but parallel and + * parallel-ordered do so opt to always use. + */ + env_atomic64_t event_count; +} app_q_ctx_t; + +/** + * Application event + */ +typedef union app_event_t { + /** Event id: notification */ + #define EVENT_NOTIF 1 + /** Event id: data */ + #define EVENT_DATA 2 + + /** Id is first in all events */ + uint32_t id; + + /** Event: notification */ + struct { + uint32_t id; + enum { + NOTIF_START_DONE, + NOTIF_RESTART, + NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST, + NOTIF_QUEUE_GROUP_MODIFY_DONE, + NOTIF_EVENT_GROUP_DATA_DONE + } type; + + em_queue_group_t used_group; + em_core_mask_t core_mask; + } notif; + + /** Event: data */ + struct { + uint32_t id; + em_queue_group_t used_group; + } data; +} app_event_t; + +/** + * Statistics for each core, pad to cache line size + */ +typedef union core_stat_t { + uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + struct { + uint64_t event_count; + }; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) == ENV_CACHE_LINE_SIZE, + CORE_STAT_T__SIZE_ERROR); + +/** + * Queue Group test shared memory + */ +typedef struct qgrp_shm_t { + em_pool_t pool ENV_CACHE_LINE_ALIGNED; + + /* Number of EM cores running the application */ + unsigned int core_count; + + /** The application has seen the exit_flag and is ready for tear down */ + env_atomic32_t exit_ack; + + app_eo_ctx_t app_eo_ctx ENV_CACHE_LINE_ALIGNED; + + app_q_ctx_t app_q_ctx ENV_CACHE_LINE_ALIGNED; + + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; +} qgrp_shm_t; + +COMPILE_TIME_ASSERT(sizeof(qgrp_shm_t) % ENV_CACHE_LINE_SIZE == 0, + QGRP_SHM_T__SIZE_ERROR); +COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_eo_ctx) % ENV_CACHE_LINE_SIZE + == 0, OFFSETOF_EO_CTX_ERROR); +COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_q_ctx) % ENV_CACHE_LINE_SIZE + == 0, OFFSETOF_Q_CTX_ERROR); +COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, core_stat) % ENV_CACHE_LINE_SIZE + == 0, OFFSETOF_CORE_STAT_ERROR); + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL qgrp_shm_t *qgrp_shm; + +static void +receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context); + +static inline void +receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx); + +static void +notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue); +static void +notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue); +static void +notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue); + +static inline void +receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx); + +static void await_exit_ack(void); + +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +stop(void *eo_context, em_eo_t eo); + +static em_status_t +start_local(void *eo_context, em_eo_t eo); + +static em_status_t +stop_local(void *eo_context, em_eo_t eo); + +static void +next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Queue Group test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void test_init(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + if (core == 0) { + qgrp_shm = env_shared_reserve("QueueGroupSharedMem", + sizeof(qgrp_shm_t)); + em_register_error_handler(test_error_handler); + } else { + qgrp_shm = env_shared_lookup("QueueGroupSharedMem"); + } + + if (qgrp_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Queue Group test init failed on EM-core: %u\n", + em_core_id()); + } else if (core == 0) { + memset(qgrp_shm, 0, sizeof(qgrp_shm_t)); + } +} + +/** + * Startup of the Queue Group test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void test_start(const appl_conf_t *appl_conf) +{ + app_event_t *app_event; + em_event_t event; + em_queue_group_t default_group; + em_queue_t notif_queue; + em_event_group_t event_group; + em_status_t err, start_err = EM_ERROR; + em_eo_t eo; + em_notif_t notif_tbl[1]; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + qgrp_shm->pool = appl_conf->pools[0]; + else + qgrp_shm->pool = EM_POOL_DEFAULT; + + /* Store the number of EM-cores running the application */ + qgrp_shm->core_count = appl_conf->core_count; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%d\n" + " Application running on %u EM-cores (procs:%u, threads:%u)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, + qgrp_shm->pool); + + test_fatal_if(qgrp_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + test_fatal_if(qgrp_shm->core_count > MAX_CORES, + "Test started on too many cores(%i)!\n" + "Max supported core count for this test is: %u\n", + qgrp_shm->core_count, MAX_CORES); + + env_atomic32_init(&qgrp_shm->exit_ack); + env_atomic32_set(&qgrp_shm->exit_ack, 0); + + /* + * Create the application EO and queues + */ + eo = em_eo_create("test_appl_queue_group", + start, start_local, stop, stop_local, + receive, &qgrp_shm->app_eo_ctx); + + default_group = em_queue_group_find("default"); + /* Verify that the find-func worked correctly. */ + test_fatal_if(default_group != EM_QUEUE_GROUP_DEFAULT, + "Default queue group(%" PRI_QGRP ") not found!", + default_group); + + notif_queue = em_queue_create("notif_queue", EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_HIGH, default_group, NULL); + test_fatal_if(notif_queue == EM_QUEUE_UNDEF, + "Notification queue creation failed!"); + + err = em_eo_add_queue_sync(eo, notif_queue); + test_fatal_if(err != EM_OK, + "Notification queue add to EO failed:%" PRI_STAT "", err); + + event_group = em_event_group_create(); + test_fatal_if(event_group == EM_EVENT_GROUP_UNDEF, + "Event group creation failed!"); + + qgrp_shm->app_eo_ctx.eo = eo; + qgrp_shm->app_eo_ctx.notif_queue = notif_queue; + qgrp_shm->app_eo_ctx.notif_qgrp = default_group; + qgrp_shm->app_eo_ctx.event_group = event_group; + + APPL_PRINT("Starting EO:%" PRI_EO "\t" + "- Notification Queue=%" PRI_QUEUE "\n", eo, notif_queue); + + event = em_alloc(sizeof(app_event_t), EM_EVENT_TYPE_SW, + qgrp_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "Notification event allocation failed"); + app_event = em_event_pointer(event); + memset(app_event, 0, sizeof(*app_event)); + app_event->notif.id = EVENT_NOTIF; + app_event->notif.type = NOTIF_START_DONE; + /* Verify group when receiving */ + app_event->notif.used_group = default_group; + + notif_tbl[0].event = event; + notif_tbl[0].queue = notif_queue; + notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; + + err = em_eo_start(eo, &start_err, NULL, 1, notif_tbl); + test_fatal_if(err != EM_OK, + "em_eo_start(%" PRI_EO "):%" PRI_STAT "", eo, err); + test_fatal_if(start_err != EM_OK, + "EO start function:%" PRI_STAT "", + start_err); +} + +void test_stop(const appl_conf_t *appl_conf) +{ + const int core = em_core_id(); + em_status_t err; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %02d\n", __func__, core); + + /* Await 'exit_ack' to be set by the EO */ + await_exit_ack(); + + em_eo_t eo = qgrp_shm->app_eo_ctx.eo; + em_event_group_t egrp; + em_notif_t notif_tbl[1] = { {.event = EM_EVENT_UNDEF} }; + int num_notifs; + + err = em_eo_stop_sync(eo); + test_fatal_if(err != EM_OK, + "EO stop:%" PRI_STAT " EO:%" PRI_EO "", err, eo); + + /* No more dispatching of the EO's events, egrp can be freed */ + + egrp = qgrp_shm->app_eo_ctx.event_group; + if (!em_event_group_is_ready(egrp)) { + num_notifs = em_event_group_get_notif(egrp, 1, notif_tbl); + err = em_event_group_abort(egrp); + if (err == EM_OK && num_notifs == 1) + em_free(notif_tbl[0].event); + } + err = em_event_group_delete(egrp); + test_fatal_if(err != EM_OK, + "egrp:%" PRI_EGRP " delete:%" PRI_STAT " EO:%" PRI_EO "", + egrp, err, eo); +} + +void test_term(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %02d\n", __func__, core); + + if (core == 0) { + env_shared_free(qgrp_shm); + em_unregister_error_handler(); + } +} + +/** + * Receive function for the test EO + */ +static void +receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + app_eo_ctx_t *eo_ctx = eo_context; + app_event_t *app_event = em_event_pointer(event); + /* Only set for the test_queue */ + app_q_ctx_t *q_ctx = queue_context; + + test_fatal_if(em_get_type_major(type) != EM_EVENT_TYPE_SW, + "Unexpected event type: 0x%x", type); + + if (unlikely(appl_shm->exit_flag)) { + /* Handle exit request */ + uint32_t exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); + + if (exit_ack) { + em_free(event); + return; + } + + if (app_event->id == EVENT_NOTIF && + (app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST || + app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE)) { + /* can be set by multiple cores */ + if (!exit_ack) + env_atomic32_set(&qgrp_shm->exit_ack, 1); + em_free(event); + return; + } + /* + * Handle events normally until a MODIFY_DONE has been + * received and exit_ack has been set. + */ + } + + switch (app_event->id) { + case EVENT_NOTIF: + receive_event_notif(eo_ctx, event, queue, q_ctx); + break; + case EVENT_DATA: + receive_event_data(eo_ctx, event, queue, q_ctx); + break; + default: + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Unknown event id(%u)!", app_event->id); + break; + } +} + +/** + * Handle the notification events received through the notif_queue + */ +static inline void +receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx) +{ + app_event_t *app_event = em_event_pointer(event); + em_status_t err; + (void)q_ctx; + + switch (app_event->notif.type) { + case NOTIF_RESTART: + APPL_PRINT("\n" + "***********************************************\n" + "!!! Restarting test !!!\n" + "***********************************************\n" + "\n\n\n"); + eo_ctx->tot_modify_count_check = 0; + notif_start_done(eo_ctx, event, queue); + break; + + case NOTIF_START_DONE: + notif_start_done(eo_ctx, event, queue); + break; + + case NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST: + err = em_eo_add_queue_sync(eo_ctx->eo, eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "EO add queue:%" PRI_STAT "", err); + eo_ctx->test_queue_added = true; + notif_queue_group_modify_done(eo_ctx, event, queue); + break; + + case NOTIF_QUEUE_GROUP_MODIFY_DONE: + notif_queue_group_modify_done(eo_ctx, event, queue); + break; + + case NOTIF_EVENT_GROUP_DATA_DONE: + notif_event_group_data_done(eo_ctx, event, queue); + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Unknown notification type:%i!", + app_event->notif.type); + break; + } +} + +/** Helper for receive_event_notif() */ +static void +notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue) +{ + em_queue_group_t new_qgrp; + em_queue_type_t new_qtype; + const char *new_qtype_str; + em_core_mask_t core_mask; + em_notif_t notif_tbl; + em_status_t err; + const em_queue_group_t qgrp_curr = em_queue_get_group(queue); + app_event_t *app_event = em_event_pointer(event); + + test_fatal_if(app_event->notif.used_group != qgrp_curr, + "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", + app_event->notif.used_group, qgrp_curr); + + /* Create a test queue group */ + snprintf(&eo_ctx->test_qgrp_name[0], + sizeof(eo_ctx->test_qgrp_name), "%s%03i", + TEST_QGRP_NAME_BASE, eo_ctx->test_qgrp_name_nbr); + + eo_ctx->test_qgrp_name[TEST_QGRP_NAME_LEN - 1] = '\0'; + eo_ctx->test_qgrp_name_nbr = (eo_ctx->test_qgrp_name_nbr + 1) + % 1000; /* Range 0-999 */ + + /* Start with EM core-0 (it's always running) */ + em_core_mask_zero(&core_mask); + em_core_mask_set(0, &core_mask); + + /* Reuse event */ + app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST; + app_event->notif.used_group = eo_ctx->notif_qgrp; + + notif_tbl.event = event; /* = app_event->notif */ + notif_tbl.queue = queue; + notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; + + em_core_mask_copy(&app_event->notif.core_mask, &core_mask); + + /* + * Create the queue group! + */ + new_qgrp = em_queue_group_create(eo_ctx->test_qgrp_name, &core_mask, + 1, ¬if_tbl); + test_fatal_if(new_qgrp == EM_QUEUE_GROUP_UNDEF, + "Queue group creation failed!"); + + if (eo_ctx->test_qgrp != EM_QUEUE_GROUP_UNDEF) { + /* + * Delete group - no need for notifs since 'modify to zero + * core mask' already done & queue deleted from group. Do the + * delete after the create to force creation of another + * queue group -> avoids always running the test with the same + * queue group. + */ + err = em_queue_group_delete(eo_ctx->test_qgrp, 0, NULL); + test_fatal_if(err != EM_OK, + "Qgrp delete:%" PRI_STAT "", err); + } + /* Store the new queue group to use for this test round */ + eo_ctx->test_qgrp = new_qgrp; + + /* + * Create a test queue for data events. The queue belongs to + * the test queue group. Change the queue type for every new + * test run. + */ + switch (eo_ctx->test_queue_type) { + case EM_QUEUE_TYPE_ATOMIC: + new_qtype = EM_QUEUE_TYPE_PARALLEL; + new_qtype_str = "PARALLEL"; + break; + case EM_QUEUE_TYPE_PARALLEL: + new_qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; + new_qtype_str = "PARALLEL_ORDERED"; + break; + default: + new_qtype = EM_QUEUE_TYPE_ATOMIC; + new_qtype_str = "ATOMIC"; + break; + } + eo_ctx->test_queue_type = new_qtype; + eo_ctx->test_queue = em_queue_create("test_queue", + eo_ctx->test_queue_type, + EM_QUEUE_PRIO_NORMAL, + eo_ctx->test_qgrp, NULL); + test_fatal_if(eo_ctx->test_queue == EM_QUEUE_UNDEF, + "Test queue creation failed!"); + eo_ctx->test_queue_added = false; + + APPL_PRINT("\n" + "Created test queue:%" PRI_QUEUE " type:%s(%u)\t" + "queue group:%" PRI_QGRP " (name:\"%s\")\n", + eo_ctx->test_queue, new_qtype_str, eo_ctx->test_queue_type, + eo_ctx->test_qgrp, eo_ctx->test_qgrp_name); + + memset(&qgrp_shm->app_q_ctx, 0, sizeof(qgrp_shm->app_q_ctx)); + env_atomic64_init(&qgrp_shm->app_q_ctx.event_count); + + err = em_queue_set_context(eo_ctx->test_queue, &qgrp_shm->app_q_ctx); + test_fatal_if(err != EM_OK, "Set queue context:%" PRI_STAT "", err); + /* + * Synchronize EO context. Event is sent through notification, + * which might have happened before we write the eo_ctx. + */ + env_sync_mem(); +} + +/** Helper for receive_event_notif() */ +static void +notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue) +{ + em_status_t err; + const em_queue_group_t qgrp_curr = em_queue_get_group(queue); + app_event_t *app_event = em_event_pointer(event); + + test_fatal_if(app_event->notif.used_group != qgrp_curr, + "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", + app_event->notif.used_group, qgrp_curr); + + if (unlikely(em_core_mask_iszero(&app_event->notif.core_mask))) { + APPL_PRINT("\n" + "*************************************\n" + "All cores removed from QueueGroup!\n" + "*************************************\n"); + + test_fatal_if(eo_ctx->tot_modify_count != + eo_ctx->tot_modify_count_check, + "Modify count != actual count:\t" + "%" PRIu64 " vs %" PRIu64 "", + eo_ctx->tot_modify_count, + eo_ctx->tot_modify_count_check); + + err = em_eo_remove_queue_sync(eo_ctx->eo, + eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "Remove test queue:%" PRI_STAT "", err); + eo_ctx->test_queue_added = false; + + APPL_PRINT("Deleting test queue:%" PRI_QUEUE ",\t" + "Qgrp ID:%" PRI_QGRP " (name:\"%s\")\n", + eo_ctx->test_queue, eo_ctx->test_qgrp, + eo_ctx->test_qgrp_name); + + err = em_queue_delete(eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "Delete test queue:%" PRI_STAT "", err); + eo_ctx->test_queue = EM_QUEUE_UNDEF; + + /* + * Delete the queue group later in restart after the + * creation of a new group. This forces the creation + * and usage of at least two different queue groups. + */ + app_event->notif.id = EVENT_NOTIF; + app_event->notif.type = NOTIF_RESTART; + app_event->notif.used_group = eo_ctx->notif_qgrp; + err = em_send(event, eo_ctx->notif_queue); + if (unlikely(err != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send to notif queue:%" PRI_STAT "", err); + } + } else { + em_notif_t egroup_notif_tbl[1]; + + /* Reuse the event */ + app_event->notif.id = EVENT_NOTIF; + app_event->notif.type = NOTIF_EVENT_GROUP_DATA_DONE; + app_event->notif.used_group = eo_ctx->notif_qgrp; + + egroup_notif_tbl[0].event = event; + egroup_notif_tbl[0].queue = eo_ctx->notif_queue; + egroup_notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; + + err = em_event_group_apply(eo_ctx->event_group, + eo_ctx->modify_threshold, 1, + egroup_notif_tbl); + test_fatal_if(err != EM_OK, + "em_event_group_apply():%" PRI_STAT "", err); + + for (int i = 0; i < EVENT_DATA_ALLOC_NBR; i++) { + em_event_t ev_data = em_alloc(sizeof(app_event_t), + EM_EVENT_TYPE_SW, + qgrp_shm->pool); + test_fatal_if(ev_data == EM_EVENT_UNDEF, + "Event alloc failed!"); + + app_event_t *data_event = em_event_pointer(ev_data); + + data_event->id = EVENT_DATA; + data_event->data.used_group = eo_ctx->test_qgrp; + + err = em_send_group(ev_data, eo_ctx->test_queue, + eo_ctx->event_group); + if (unlikely(err != EM_OK)) { + em_free(ev_data); + test_fatal_if(!appl_shm->exit_flag, + "Send to test queue:%" PRI_STAT "", + err); + } + } + } +} + +/** Helper for receive_event_notif() */ +static void +notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue) +{ + em_core_mask_t core_mask, used_mask; + em_notif_t notif_tbl; + em_status_t err; + int core_count; + int i; + const em_queue_group_t qgrp_curr = em_queue_get_group(queue); + app_event_t *app_event = em_event_pointer(event); + + test_fatal_if(app_event->notif.used_group != qgrp_curr, + "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", + app_event->notif.used_group, qgrp_curr); + + uint64_t mod_cnt = ++eo_ctx->qgrp_modify_count; + + eo_ctx->tot_modify_count_check++; + + err = em_queue_group_get_mask(eo_ctx->test_qgrp, &used_mask); + test_fatal_if(err != EM_OK, + "Get queue group mask:%" PRI_STAT "", err); + + /* Get the next core mask for the test group */ + next_core_mask(/*New*/ &core_mask, /*Max*/ &eo_ctx->core_mask_max, + eo_ctx->tot_modify_count_check); + + if (mod_cnt >= eo_ctx->print_threshold || + em_core_mask_iszero(&core_mask)) { + char used_mask_str[EM_CORE_MASK_STRLEN]; + char core_mask_str[EM_CORE_MASK_STRLEN]; + + em_core_mask_tostr(used_mask_str, EM_CORE_MASK_STRLEN, + &used_mask); + em_core_mask_tostr(core_mask_str, EM_CORE_MASK_STRLEN, + &core_mask); + APPL_PRINT("\n" + "****************************************\n" + "Received %" PRIu64 " events on Q:%" PRI_QUEUE ":\n" + " QueueGroup:%" PRI_QGRP ", Curr Coremask:%s\n" + "Now Modifying:\n" + " QueueGroup:%" PRI_QGRP ", New Coremask:%s\n" + "****************************************\n", + env_atomic64_get(&qgrp_shm->app_q_ctx.event_count), + eo_ctx->test_queue, eo_ctx->test_qgrp, + used_mask_str, eo_ctx->test_qgrp, core_mask_str); + + eo_ctx->qgrp_modify_count = 0; + } + + /* + * Sanity check: verify that all cores that process the queue + * group actually received events and that other cores do not + * get any events. + */ + core_count = qgrp_shm->core_count; + for (i = 0; i < core_count; i++) { + const uint64_t ev_count = qgrp_shm->core_stat[i].event_count; + char mstr[EM_CORE_MASK_STRLEN]; + + if (em_core_mask_isset(i, &used_mask)) { + if (unlikely(ev_count == 0)) { + em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, + &used_mask); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "No events on core%i, mask:%s", + i, mstr); + } + } else if (unlikely(ev_count > 0)) { + em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, + &used_mask); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Events:%" PRIu64 " on inv.core%i, mask:%s", + ev_count, i, mstr); + } + } + + memset(qgrp_shm->core_stat, 0, sizeof(qgrp_shm->core_stat)); + env_atomic64_set(&qgrp_shm->app_q_ctx.event_count, 0); + + /* Reuse the event */ + app_event->id = EVENT_NOTIF; + app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE; + app_event->notif.used_group = eo_ctx->notif_qgrp; + em_core_mask_copy(&app_event->notif.core_mask, &core_mask); + + notif_tbl.event = event; + notif_tbl.queue = eo_ctx->notif_queue; + notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; + + err = em_queue_group_modify(eo_ctx->test_qgrp, &core_mask, + 1, ¬if_tbl); + test_fatal_if(err != EM_OK, + "em_queue_group_modify():%" PRI_STAT "", err); +} + +/** + * Handle the test data events received through the test_queue + * + * Check that the queue group is valid and send the data back to the same + * queue for another round. + * The last event should trigger a notification event to be sent to the + * notif_queue to begin the queue group modification sequence. + */ +static inline void +receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx) +{ + int core_id = em_core_id(); + app_event_t *app_event = em_event_pointer(event); + em_queue_group_t qgrp_curr = em_queue_get_group(queue); + em_core_mask_t used_mask; + em_status_t err; + const uint64_t event_count = + env_atomic64_add_return(&q_ctx->event_count, 1); + qgrp_shm->core_stat[core_id].event_count++; + + /* Verify that the queue group is correct & expected */ + test_fatal_if(app_event->data.used_group != qgrp_curr, + "Queue grp mismatch:%" PRI_QGRP "!=%" PRI_QGRP "", + app_event->data.used_group, qgrp_curr); + + /* Verify that this core is a valid receiver of events in this group */ + err = em_queue_group_get_mask(qgrp_curr, &used_mask); + test_fatal_if(err != EM_OK, + "Get queue group mask:%" PRI_STAT "", err); + + if (unlikely(!em_core_mask_isset(core_id, &used_mask))) { + char mask_str[EM_CORE_MASK_STRLEN]; + + em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, &used_mask); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Core bit not set in core mask! core:%02i mask:%s", + core_id, mask_str); + } + + /* + * Handle the test data event + */ + if (event_count <= eo_ctx->modify_threshold - EVENT_DATA_ALLOC_NBR) { + /* Send the data event for another round */ + err = em_send_group(event, eo_ctx->test_queue, + eo_ctx->event_group); + if (unlikely(err != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send to test queue:%" PRI_STAT "", err); + } + } else if (event_count <= eo_ctx->modify_threshold) { + /* + * Free the events for the last round, an event group + * notification event should be triggered when the last event + * has been processed + */ + em_free(event); + } else { + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xacdc, + "Invalid event count(%u)!", event_count); + } +} + +/** + * Await exit_ack to be set by the EO. + */ +static void await_exit_ack(void) +{ + env_time_t t_max = env_time_global_from_ns(20 * 1000000000ULL); /*20s*/ + env_time_t t_now = ENV_TIME_NULL; + env_time_t t_start = env_time_global(); + env_time_t t_end = env_time_sum(t_start, t_max); + uint64_t ns; + uint32_t exit_ack = 0; + + long double sec; + + do { + if (!exit_ack) + em_dispatch(1); + exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); + t_now = env_time_global(); + } while (!exit_ack && env_time_cmp(t_now, t_end) < 0); + + ns = env_time_diff_ns(t_now, t_start); + sec = (long double)ns / 1000000000.0; + + if (unlikely(!exit_ack)) { + test_error(EM_ERR_TIMEOUT, 0xdead, + "Timeout: No exit_ack within %Lfs!\n", sec); + return; + } + + APPL_PRINT("exit_ack in %Lfs on EM-core:%02d => Tearing down\n", + sec, em_core_id()); +} + +/** + * Global start function for the test EO + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + app_eo_ctx_t *eo_ctx = eo_context; + uint64_t tot_modify_count = 0; + uint64_t tmp; + int ret; + + (void)eo; + (void)conf; + + APPL_PRINT("Queue Group Test - Global EO Start\n"); + + snprintf(&eo_ctx->test_qgrp_name[0], + sizeof(eo_ctx->test_qgrp_name), + "%s%03i", TEST_QGRP_NAME_BASE, 0); + + em_core_mask_zero(&eo_ctx->core_mask_max); + em_core_mask_set_count(qgrp_shm->core_count, &eo_ctx->core_mask_max); + + /* + * The values used below in calculations are derived from the way the + * next_core_mask() function calculates the next core mask to use. + */ + ret = em_core_mask_get_bits(&tmp, 1, &eo_ctx->core_mask_max); + if (unlikely(ret != 1)) { + char mask_str[EM_CORE_MASK_STRLEN]; + + em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, + &eo_ctx->core_mask_max); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "em_core_mask_get_bits(coremask=%s), ret=%i", + mask_str, ret); + } + + do { + tot_modify_count += (tmp & 0xFF) + 1; + tmp = (tmp >> 4); + if (tmp < 0x10) + break; + } while (tmp); + + tot_modify_count -= 1; + + eo_ctx->tot_modify_count = tot_modify_count; + eo_ctx->tot_modify_count_check = 0; + + eo_ctx->print_threshold = tot_modify_count / TEST_PRINT_COUNT; + + if (eo_ctx->print_threshold == 0) + eo_ctx->print_threshold = 1; + + /* + * 256*15 - 1 is the maximum number of core masks tested when 64 + * cores (max) are running this test. + */ + eo_ctx->modify_threshold = + ((256 * 15 * 0x1000) - 1) / tot_modify_count; + eo_ctx->modify_threshold = ROUND_UP(eo_ctx->modify_threshold, + EVENT_DATA_ALLOC_NBR); + + APPL_PRINT("\n" + "*******************************************************\n" + "Test threshold values set:\n" + " Tot group modifies: %" PRIu64 "\n" + " Events received on group before modify: %" PRIu64 "\n" + " Group modify print threshold: %" PRIu64 "\n" + "*******************************************************\n" + "\n", + tot_modify_count, eo_ctx->modify_threshold, + eo_ctx->print_threshold); + + return EM_OK; +} + +/** + * Global stop function for the test EO + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + em_status_t err; + app_eo_ctx_t *eo_ctx = eo_context; + + /* remove and delete all of the EO's queues */ + err = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(err != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + err, eo); + if (eo_ctx->test_queue != EM_QUEUE_UNDEF && !eo_ctx->test_queue_added) { + err = em_queue_delete(eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "Delete test queue:%" PRI_STAT "", err); + } + + /* delete the EO at the end of the stop-function */ + err = em_eo_delete(eo); + test_fatal_if(err != EM_OK, + "EO delete:%" PRI_STAT " EO:%" PRI_EO "", + err, eo); + APPL_PRINT("Queue Group Test - Global EO Stop\n"); + + return EM_OK; +} + +/** + * Local start function for the test EO + */ +static em_status_t +start_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + (void)eo; + + APPL_PRINT("Queue Group Test - Local EO Start: EM-core:%02d\n", + em_core_id()); + return EM_OK; +} + +/** + * Local stop function for the test EO + */ +static em_status_t +stop_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + (void)eo; + + APPL_PRINT("Queue Group Test - Local EO Stop: EM-core:%02d\n", + em_core_id()); + return EM_OK; +} + +/** + * Update the core mask: + * E.g. if max_mask is 0xFFFF: 0x0001-0x0100 (256 masks), + * 0x0010->0x1000 (256 masks), 0x0100-0x0000 (255 masks) + */ +static void +next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count) +{ + uint64_t mask64 = ((uint64_t)(count % 256) + 1) << (4 * (count / 256)); + + em_core_mask_zero(new_mask); + em_core_mask_set_bits(&mask64, 1, new_mask); + em_core_mask_and(new_mask, new_mask, max_mask); +} diff --git a/programs/example/timer/timer_hello.c b/programs/example/timer/timer_hello.c index 3f7ac7c1..207a0510 100644 --- a/programs/example/timer/timer_hello.c +++ b/programs/example/timer/timer_hello.c @@ -1,523 +1,528 @@ -/* - * Copyright (c) 2016, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine Timer hello world example. - * - * Timer hello world example to show basic event timer usage. Creates a - * single EO that starts a periodic and a random one-shot timeout. - * - * Exception/error management is simplified to focus on basic timer usage. - */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* test app defines */ -#define APP_MAX_TEXT_LEN 128 /* string length limit */ -#define APP_TIMEOUT_MODULO_MS 30000 /* max random timeout */ -#define APP_TIMEOUT_MIN_MS 100 /* minimum random timeout */ -#define APP_PERIOD_MS 1000 /* heartbeat tick period */ - -#define APP_EO_NAME "Control EO" - -/** - * Example application message event - */ -typedef enum app_cmd_t { - APP_CMD_TMO, /* periodic timeout */ - APP_CMD_HELLO /* random timeout */ -} app_cmd_t; - -typedef struct app_msg_t { - app_cmd_t command; - uint64_t count; - char text[APP_MAX_TEXT_LEN]; - /* for managing periodic timeouts */ - em_tmo_t tmo; -} app_msg_t; - -/** - * EO context - */ -typedef struct app_eo_ctx_t { - em_tmo_t periodic_tmo; - em_tmo_t random_tmo; - em_queue_t my_q; - uint64_t hz; -} app_eo_ctx_t; - -/** - * Timer hello world shared memory data - */ -typedef struct timer_app_shm_t { - /* Event pool used by this application */ - em_pool_t pool; - /* EO context data */ - app_eo_ctx_t eo_context; - em_queue_t eo_q; - em_timer_t tmr; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} timer_app_shm_t; - -/* EM-core locals */ -static ENV_LOCAL timer_app_shm_t *m_shm; -static ENV_LOCAL unsigned int m_randseed; - -/* Local function prototypes */ -static em_status_t app_eo_start(app_eo_ctx_t *eo_ctx, em_eo_t eo, - const em_eo_conf_t *conf); -static em_status_t app_eo_start_local(app_eo_ctx_t *eo_ctx, em_eo_t eo); -static em_status_t app_eo_stop(app_eo_ctx_t *eo_ctx, em_eo_t eo); -static void app_eo_receive(app_eo_ctx_t *eo_ctx, em_event_t event, - em_event_type_t type, em_queue_t queue, - void *q_ctx); -static void new_rand_timeout(app_eo_ctx_t *eo_ctx); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Before EM - Init of the test application. - * - * The shared memory is needed if EM instance runs on multiple processes. - * Doing it like this makes it possible to run the app both as threads (-t) - * as well as processes (-p). - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void test_init(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - if (core == 0) { - /* first core creates the ShMem */ - m_shm = env_shared_reserve("TimerAppShMem", - sizeof(timer_app_shm_t)); - em_register_error_handler(test_error_handler); - } else { - m_shm = env_shared_lookup("TimerAppShMem"); - } - - if (m_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "init failed on EM-core: %u", - em_core_id()); - } else if (core == 0) { - /* initialize shared memory for EM app init */ - memset(m_shm, 0, sizeof(timer_app_shm_t)); - } -} - -/** - * Startup of the timer hello EM application. - * - * At this point EM is up, but no EOs exist. EM API can be used to create - * queues, EOs etc. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void test_start(const appl_conf_t *appl_conf) -{ - em_eo_t eo; - em_timer_attr_t attr; - em_queue_t queue; - em_status_t stat; - em_event_t event; - app_msg_t *msg; - app_eo_ctx_t *eo_ctx; - uint64_t period; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - m_shm->pool = appl_conf->pools[0]; - else - m_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%d\n" - " Application running on %u EM-cores (procs:%u, threads:%u)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, - m_shm->pool); - - test_fatal_if(m_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* Create EO */ - eo = em_eo_create(APP_EO_NAME, - (em_start_func_t)app_eo_start, - (em_start_local_func_t)app_eo_start_local, - (em_stop_func_t)app_eo_stop, NULL, - (em_receive_func_t)app_eo_receive, - &m_shm->eo_context); - test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); - eo_ctx = &m_shm->eo_context; - - /* one basic queue */ - queue = em_queue_create("Timer hello Q", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create queue!"); - m_shm->eo_q = queue; - - /* - * Create shared timer and store handle in shared memory. - * Accept all defaults. - */ - em_timer_attr_init(&attr); - strncpy(attr.name, "ExampleTimer", EM_TIMER_NAME_LEN); - m_shm->tmr = em_timer_create(&attr); - test_fatal_if(m_shm->tmr == EM_TIMER_UNDEF, "Failed to create timer!"); - - /* Start EO */ - stat = em_eo_start_sync(eo, NULL, NULL); - test_fatal_if(stat != EM_OK, "Failed to start EO!"); - - /* create periodic timer */ - eo_ctx->periodic_tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_PERIODIC, - eo_ctx->my_q); - test_fatal_if(eo_ctx->periodic_tmo == EM_TMO_UNDEF, "Can't allocate tmo!\n"); - - /* allocate timeout event */ - event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event!\n"); - - msg = em_event_pointer(event); - msg->command = APP_CMD_TMO; - msg->tmo = eo_ctx->periodic_tmo; - msg->count = 0; - eo_ctx->hz = em_timer_get_freq(m_shm->tmr); /* save for later */ - if (eo_ctx->hz < 1000) { /* sanity check */ - APPL_ERROR("WARNING - timer hz very low!\n"); - } - - /* pre-allocate random timeout */ - eo_ctx->random_tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_ONESHOT, - eo_ctx->my_q); - test_fatal_if(eo_ctx->random_tmo == EM_TMO_UNDEF, "Can't allocate tmo!\n"); - - /* setup periodic timeout (the tick) */ - period = eo_ctx->hz / 1000; /* ticks for 1 ms */ - period *= APP_PERIOD_MS; - stat = em_tmo_set_periodic(eo_ctx->periodic_tmo, 0, period, event); - test_fatal_if(stat != EM_OK, "Can't activate tmo!\n"); -} - -void test_stop(const appl_conf_t *appl_conf) -{ - const int core = em_core_id(); - em_status_t ret; - em_eo_t eo; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - eo = em_eo_find(APP_EO_NAME); - test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - - ret = em_timer_delete(m_shm->tmr); - test_fatal_if(ret != EM_OK, - "Timer:%" PRI_TMR " delete:%" PRI_STAT "", - m_shm->tmr, ret); -} - -void test_term(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (m_shm != NULL) { - env_shared_free(m_shm); - m_shm = NULL; - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - */ -static em_status_t app_eo_start(app_eo_ctx_t *eo_ctx, em_eo_t eo, - const em_eo_conf_t *conf) -{ - em_timer_attr_t attr; - em_timer_t tmr; - int num_timers; - - (void)eo; - (void)conf; - - APPL_PRINT("EO start\n"); - - /* print timer info */ - num_timers = em_timer_get_all(&tmr, 1); - APPL_PRINT("System has %d timer(s)\n", num_timers); - - if (em_timer_get_attr(m_shm->tmr, &attr) != EM_OK) { - APPL_ERROR("Can't get timer info!\n"); - return EM_ERR_BAD_ID; - } - - APPL_PRINT("Timer \"%s\" info:\n", attr.name); - APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); - APPL_PRINT(" -max_tmo: %" PRIu64 " us\n", attr.resparam.max_tmo / 1000); - APPL_PRINT(" -min_tmo: %" PRIu64 " us\n", attr.resparam.min_tmo / 1000); - APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); - APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); - APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", - em_timer_get_freq(m_shm->tmr)); - - /* init local EO context */ - eo_ctx->my_q = m_shm->eo_q; - - return EM_OK; -} - -/** - * @private - * - * EO per thread start function. - * - */ -static em_status_t app_eo_start_local(app_eo_ctx_t *eo_ctx, em_eo_t eo) -{ - (void)eo_ctx; - (void)eo; - - APPL_PRINT("EO local start\n"); - - /* per-thread random seed */ - m_randseed = (unsigned int)em_timer_current_tick(m_shm->tmr); - - /* with a low frequency timer we actually get the same seed! */ - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - * - */ -static em_status_t app_eo_stop(app_eo_ctx_t *eo_ctx, em_eo_t eo) -{ - em_event_t event = EM_EVENT_UNDEF; - em_status_t ret; - - APPL_PRINT("EO stop\n"); - - /* cancel and delete ongoing timeouts */ - if (eo_ctx->periodic_tmo != EM_TMO_UNDEF) { - em_tmo_delete(eo_ctx->periodic_tmo, &event); - if (event != EM_EVENT_UNDEF) - em_free(event); - } - if (eo_ctx->random_tmo != EM_TMO_UNDEF) { - event = EM_EVENT_UNDEF; - em_tmo_delete(eo_ctx->random_tmo, &event); - if (event != EM_EVENT_UNDEF) - em_free(event); - } - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -/** - * @private - * - * EO receive function. This runs the example app after initialization. - * - * Prints tick-tock at every periodic timeout and in parallel runs random - * timeouts that trigger printing of a random quote. - * - */ -static void app_eo_receive(app_eo_ctx_t *eo_ctx, em_event_t event, - em_event_type_t type, em_queue_t queue, - void *q_ctx) -{ - int reuse = 0; - em_status_t ret; - - (void)queue; - (void)q_ctx; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (type == EM_EVENT_TYPE_SW) { - app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); - - switch (msgin->command) { - case APP_CMD_TMO: - /* print tick-tock */ - msgin->count++; - if (msgin->count & 1) - APPL_PRINT("%" PRIu64 ". ", - (msgin->count / 2) + 1); - APPL_PRINT((msgin->count & 1) ? "tick\n" : "tock\n"); - - /* ack periodic timeout, re-use the same event */ - ret = em_tmo_ack(msgin->tmo, event); - test_fatal_if(ret != EM_OK, - "em_tmo_ack():%" PRI_STAT, ret); - - reuse = 1; /* do not free this event */ - - /* get random timeouts going after 10th message */ - if (msgin->count == 10) - new_rand_timeout(eo_ctx); - break; - - case APP_CMD_HELLO: - APPL_PRINT("%s\n\n", msgin->text); - /* set next timeout */ - new_rand_timeout(eo_ctx); - break; - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Invalid event!\n"); - } - } else { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Invalid event type!\n"); - } - - /* normally free the received event */ - if (!reuse) - em_free(event); -} - -/* sets a new random timeout */ -void new_rand_timeout(app_eo_ctx_t *eo_ctx) -{ - int rnd; - app_msg_t *msg; - uint64_t period; - em_status_t stat; - - /* random timeouts allocate new event every time (could re-use) */ - em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, - m_shm->pool); - if (!event) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't allocate event!"); - } - - msg = em_event_pointer(event); - msg->command = APP_CMD_HELLO; - - /* new timeout period APP_TIMEOUT_MIN_MS ... APP_TIMEOUT_MODULO */ - do { - rnd = rand_r(&m_randseed); - rnd %= APP_TIMEOUT_MODULO_MS; - - } while (rnd < APP_TIMEOUT_MIN_MS); - - snprintf(msg->text, APP_MAX_TEXT_LEN, "%d ms gone!\n", rnd); - msg->text[APP_MAX_TEXT_LEN - 1] = 0; - - APPL_PRINT("Meditation time: what can you do in %d ms?\n", rnd); - - period = eo_ctx->hz / 1000; - period *= rnd; /* rnd x ms */ - - /* Alternate between set_rel() and set_abs(), roughly half of each */ - if (rnd > (APP_TIMEOUT_MODULO_MS + APP_TIMEOUT_MIN_MS) / 2) - stat = em_tmo_set_rel(eo_ctx->random_tmo, period, event); - else - stat = em_tmo_set_abs(eo_ctx->random_tmo, - em_timer_current_tick(m_shm->tmr) + - period, event); - if (stat != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't activate tmo!\n"); -} +/* + * Copyright (c) 2016, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine Timer hello world example. + * + * Timer hello world example to show basic event timer usage. Creates a + * single EO that starts a periodic and a random one-shot timeout. + * + * Exception/error management is simplified to focus on basic timer usage. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* test app defines */ +#define APP_MAX_TEXT_LEN 128 /* string length limit */ +#define APP_TIMEOUT_MODULO_MS 30000 /* max random timeout */ +#define APP_TIMEOUT_MIN_MS 100 /* minimum random timeout */ +#define APP_PERIOD_MS 1000 /* heartbeat tick period */ + +#define APP_EO_NAME "Control EO" + +/** + * Example application message event + */ +typedef enum app_cmd_t { + APP_CMD_TMO, /* periodic timeout */ + APP_CMD_HELLO /* random timeout */ +} app_cmd_t; + +typedef struct app_msg_t { + app_cmd_t command; + uint64_t count; + char text[APP_MAX_TEXT_LEN]; + /* for managing periodic timeouts */ + em_tmo_t tmo; +} app_msg_t; + +/** + * EO context + */ +typedef struct app_eo_ctx_t { + em_tmo_t periodic_tmo; + em_tmo_t random_tmo; + em_queue_t my_q; + uint64_t hz; +} app_eo_ctx_t; + +/** + * Timer hello world shared memory data + */ +typedef struct timer_app_shm_t { + /* Event pool used by this application */ + em_pool_t pool; + /* EO context data */ + app_eo_ctx_t eo_context; + em_queue_t eo_q; + em_timer_t tmr; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} timer_app_shm_t; + +/* EM-core locals */ +static ENV_LOCAL timer_app_shm_t *m_shm; +static ENV_LOCAL unsigned int m_randseed; + +/* Local function prototypes */ +static em_status_t app_eo_start(app_eo_ctx_t *eo_ctx, em_eo_t eo, + const em_eo_conf_t *conf); +static em_status_t app_eo_start_local(app_eo_ctx_t *eo_ctx, em_eo_t eo); +static em_status_t app_eo_stop(app_eo_ctx_t *eo_ctx, em_eo_t eo); +static void app_eo_receive(app_eo_ctx_t *eo_ctx, em_event_t event, + em_event_type_t type, em_queue_t queue, + void *q_ctx); +static void new_rand_timeout(app_eo_ctx_t *eo_ctx); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Before EM - Init of the test application. + * + * The shared memory is needed if EM instance runs on multiple processes. + * Doing it like this makes it possible to run the app both as threads (-t) + * as well as processes (-p). + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void test_init(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + if (core == 0) { + /* first core creates the ShMem */ + m_shm = env_shared_reserve("TimerAppShMem", + sizeof(timer_app_shm_t)); + em_register_error_handler(test_error_handler); + } else { + m_shm = env_shared_lookup("TimerAppShMem"); + } + + if (m_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "init failed on EM-core: %u", + em_core_id()); + } else if (core == 0) { + /* initialize shared memory for EM app init */ + memset(m_shm, 0, sizeof(timer_app_shm_t)); + } +} + +/** + * Startup of the timer hello EM application. + * + * At this point EM is up, but no EOs exist. EM API can be used to create + * queues, EOs etc. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void test_start(const appl_conf_t *appl_conf) +{ + em_eo_t eo; + em_timer_attr_t attr; + em_queue_t queue; + em_status_t stat; + em_event_t event; + app_msg_t *msg; + app_eo_ctx_t *eo_ctx; + uint64_t period; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + m_shm->pool = appl_conf->pools[0]; + else + m_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%d\n" + " Application running on %u EM-cores (procs:%u, threads:%u)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, + m_shm->pool); + + test_fatal_if(m_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* Create EO */ + eo = em_eo_create(APP_EO_NAME, + (em_start_func_t)app_eo_start, + (em_start_local_func_t)app_eo_start_local, + (em_stop_func_t)app_eo_stop, NULL, + (em_receive_func_t)app_eo_receive, + &m_shm->eo_context); + test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); + eo_ctx = &m_shm->eo_context; + + /* one basic queue */ + queue = em_queue_create("Timer hello Q", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create queue!"); + m_shm->eo_q = queue; + + /* + * Create shared timer and store handle in shared memory. + * Accept all defaults. + */ + em_timer_attr_init(&attr); + strncpy(attr.name, "ExampleTimer", EM_TIMER_NAME_LEN); + m_shm->tmr = em_timer_create(&attr); + test_fatal_if(m_shm->tmr == EM_TIMER_UNDEF, "Failed to create timer!"); + + /* Start EO */ + stat = em_eo_start_sync(eo, NULL, NULL); + test_fatal_if(stat != EM_OK, "Failed to start EO!"); + + /* create periodic timer */ + eo_ctx->periodic_tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_PERIODIC, + eo_ctx->my_q); + test_fatal_if(eo_ctx->periodic_tmo == EM_TMO_UNDEF, "Can't allocate tmo!\n"); + + /* allocate timeout event */ + event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event!\n"); + + msg = em_event_pointer(event); + msg->command = APP_CMD_TMO; + msg->tmo = eo_ctx->periodic_tmo; + msg->count = 0; + eo_ctx->hz = em_timer_get_freq(m_shm->tmr); /* save for later */ + if (eo_ctx->hz < 1000) { /* sanity check */ + APPL_ERROR("WARNING - timer hz very low!\n"); + } + + /* pre-allocate random timeout */ + eo_ctx->random_tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_ONESHOT, + eo_ctx->my_q); + test_fatal_if(eo_ctx->random_tmo == EM_TMO_UNDEF, "Can't allocate tmo!\n"); + + /* setup periodic timeout (the tick) */ + period = eo_ctx->hz / 1000; /* ticks for 1 ms */ + period *= APP_PERIOD_MS; + stat = em_tmo_set_periodic(eo_ctx->periodic_tmo, 0, period, event); + test_fatal_if(stat != EM_OK, "Can't activate tmo!\n"); +} + +void test_stop(const appl_conf_t *appl_conf) +{ + const int core = em_core_id(); + em_status_t ret; + em_eo_t eo; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + eo = em_eo_find(APP_EO_NAME); + test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + + ret = em_timer_delete(m_shm->tmr); + test_fatal_if(ret != EM_OK, + "Timer:%" PRI_TMR " delete:%" PRI_STAT "", + m_shm->tmr, ret); +} + +void test_term(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (m_shm != NULL) { + env_shared_free(m_shm); + m_shm = NULL; + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + */ +static em_status_t app_eo_start(app_eo_ctx_t *eo_ctx, em_eo_t eo, + const em_eo_conf_t *conf) +{ + em_timer_attr_t attr; + em_timer_t tmr; + int num_timers; + + (void)eo; + (void)conf; + + APPL_PRINT("EO start\n"); + + /* print timer info */ + num_timers = em_timer_get_all(&tmr, 1); + APPL_PRINT("System has %d timer(s)\n", num_timers); + + if (em_timer_get_attr(m_shm->tmr, &attr) != EM_OK) { + APPL_ERROR("Can't get timer info!\n"); + return EM_ERR_BAD_ID; + } + + APPL_PRINT("Timer \"%s\" info:\n", attr.name); + APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); + APPL_PRINT(" -max_tmo: %" PRIu64 " us\n", attr.resparam.max_tmo / 1000); + APPL_PRINT(" -min_tmo: %" PRIu64 " us\n", attr.resparam.min_tmo / 1000); + APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); + APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); + APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", + em_timer_get_freq(m_shm->tmr)); + + /* init local EO context */ + eo_ctx->my_q = m_shm->eo_q; + + return EM_OK; +} + +/** + * @private + * + * EO per thread start function. + * + */ +static em_status_t app_eo_start_local(app_eo_ctx_t *eo_ctx, em_eo_t eo) +{ + (void)eo_ctx; + (void)eo; + + APPL_PRINT("EO local start\n"); + + /* per-thread random seed */ + m_randseed = (unsigned int)em_timer_current_tick(m_shm->tmr); + + /* with a low frequency timer we actually get the same seed! */ + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t app_eo_stop(app_eo_ctx_t *eo_ctx, em_eo_t eo) +{ + em_event_t event = EM_EVENT_UNDEF; + em_status_t ret; + + APPL_PRINT("EO stop\n"); + + /* cancel and delete ongoing timeouts */ + if (eo_ctx->periodic_tmo != EM_TMO_UNDEF) { + if (em_tmo_get_state(eo_ctx->periodic_tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->periodic_tmo, &event); + + em_tmo_delete(eo_ctx->periodic_tmo); + if (event != EM_EVENT_UNDEF) + em_free(event); + } + if (eo_ctx->random_tmo != EM_TMO_UNDEF) { + event = EM_EVENT_UNDEF; + if (em_tmo_get_state(eo_ctx->random_tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->random_tmo, &event); + em_tmo_delete(eo_ctx->random_tmo); + if (event != EM_EVENT_UNDEF) + em_free(event); + } + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +/** + * @private + * + * EO receive function. This runs the example app after initialization. + * + * Prints tick-tock at every periodic timeout and in parallel runs random + * timeouts that trigger printing of a random quote. + * + */ +static void app_eo_receive(app_eo_ctx_t *eo_ctx, em_event_t event, + em_event_type_t type, em_queue_t queue, + void *q_ctx) +{ + int reuse = 0; + em_status_t ret; + + (void)queue; + (void)q_ctx; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (type == EM_EVENT_TYPE_SW) { + app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); + + switch (msgin->command) { + case APP_CMD_TMO: + /* print tick-tock */ + msgin->count++; + if (msgin->count & 1) + APPL_PRINT("%" PRIu64 ". ", + (msgin->count / 2) + 1); + APPL_PRINT((msgin->count & 1) ? "tick\n" : "tock\n"); + + /* ack periodic timeout, reuse the same event */ + ret = em_tmo_ack(msgin->tmo, event); + test_fatal_if(ret != EM_OK, + "em_tmo_ack():%" PRI_STAT, ret); + + reuse = 1; /* do not free this event */ + + /* get random timeouts going after 10th message */ + if (msgin->count == 10) + new_rand_timeout(eo_ctx); + break; + + case APP_CMD_HELLO: + APPL_PRINT("%s\n\n", msgin->text); + /* set next timeout */ + new_rand_timeout(eo_ctx); + break; + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Invalid event!\n"); + } + } else { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Invalid event type!\n"); + } + + /* normally free the received event */ + if (!reuse) + em_free(event); +} + +/* sets a new random timeout */ +void new_rand_timeout(app_eo_ctx_t *eo_ctx) +{ + int rnd; + app_msg_t *msg; + uint64_t period; + em_status_t stat; + + /* random timeouts allocate new event every time (could reuse) */ + em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, + m_shm->pool); + if (!event) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't allocate event!"); + } + + msg = em_event_pointer(event); + msg->command = APP_CMD_HELLO; + + /* new timeout period APP_TIMEOUT_MIN_MS ... APP_TIMEOUT_MODULO */ + do { + rnd = rand_r(&m_randseed); + rnd %= APP_TIMEOUT_MODULO_MS; + + } while (rnd < APP_TIMEOUT_MIN_MS); + + snprintf(msg->text, APP_MAX_TEXT_LEN, "%d ms gone!\n", rnd); + msg->text[APP_MAX_TEXT_LEN - 1] = 0; + + APPL_PRINT("Meditation time: what can you do in %d ms?\n", rnd); + + period = eo_ctx->hz / 1000; + period *= rnd; /* rnd x ms */ + + /* Alternate between set_rel() and set_abs(), roughly half of each */ + if (rnd > (APP_TIMEOUT_MODULO_MS + APP_TIMEOUT_MIN_MS) / 2) + stat = em_tmo_set_rel(eo_ctx->random_tmo, period, event); + else + stat = em_tmo_set_abs(eo_ctx->random_tmo, + em_timer_current_tick(m_shm->tmr) + + period, event); + if (stat != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't activate tmo!\n"); +} diff --git a/programs/performance/Makefile.am b/programs/performance/Makefile.am index f275e73a..36fe6200 100644 --- a/programs/performance/Makefile.am +++ b/programs/performance/Makefile.am @@ -6,6 +6,7 @@ noinst_PROGRAMS = atomic_processing_end \ loop_multircv \ loop_refs \ loop_vectors \ + loop_united \ queue_groups \ queues \ queues_unscheduled \ @@ -36,6 +37,9 @@ loop_refs_CFLAGS = $(AM_CFLAGS) loop_vectors_LDFLAGS = $(AM_LDFLAGS) loop_vectors_CFLAGS = $(AM_CFLAGS) +loop_united_LDFLAGS = $(AM_LDFLAGS) +loop_united_CFLAGS = $(AM_CFLAGS) + queue_groups_LDFLAGS = $(AM_LDFLAGS) queue_groups_CFLAGS = $(AM_CFLAGS) @@ -75,6 +79,7 @@ dist_loop_SOURCES = loop.c dist_loop_multircv_SOURCES = loop_multircv.c dist_loop_refs_SOURCES = loop_refs.c dist_loop_vectors_SOURCES = loop_vectors.c +dist_loop_united_SOURCES = loop_united.c dist_queue_groups_SOURCES = queue_groups.c dist_queues_SOURCES = queues.c dist_queues_unscheduled_SOURCES = queues_unscheduled.c diff --git a/programs/performance/loop_united.c b/programs/performance/loop_united.c new file mode 100644 index 00000000..05367d67 --- /dev/null +++ b/programs/performance/loop_united.c @@ -0,0 +1,1406 @@ +/* + * Copyright (c) 2024, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test example combining together loop, loop_vector, + * loop_multircv, loop_refs and pairs test applications with parameters. + * + * loops (loop, loop_vector, loop_multircv): + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of EOs in the system. The test has a number of EOs, each + * with one queue. Each EO receives events through its dedicated queue and + * sends them right back into the same queue, thus looping the events. + * + * For vector loop-type instead of separate the events is used event-vectors. + * + * For multircv loop-type is used a multi-event EO-receive function. + * + * loop_refs: + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of EOs in the system. The test has a number of EOs, each + * with one queue. Each EO receives events (references) through its dedicated + * queue and sends them right back into the same queue, thus looping the events. + * Each sent event is a reference in the example. + * + * pairs: + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of EO pairs in the system. Test has a number of EOs + * arranged to pairs, which ping-pong looping the events between the EOs defined + * for the pair. Depending on test dynamics (e.g. single burst in atomic queue) + * only one EO of a pair might be active at a time. + * + * From the pairs test application is inherited option to set portion of queues + * to different priorities. It uses three different queue priority levels that + * affect scheduling (might starve low prio queues if using a strict prio + * scheduler). + * + * The --priorities option can be also used now for other loops as well. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +#define USAGE_FMT \ +"\n" \ +"Usage: %s EM-ODP options -- APP SPECIFIC OPTIONS\n" \ +" E.g. %s -c 0xfe -t -- -l l -e 10\n" \ +"\n" \ +"Open Event Machine example application.\n" \ +"\n" \ +"Note to get EM-ODP options:\n" \ +" -h, --help before/without -- option.\n" \ +"\n" \ +"APP specific options\n" \ +" -l, --loop \n" \ +" Queue type. (default: atomic, but for refs parallel)\n" \ +" a: atomic\n" \ +" p: parallel\n" \ +" -c, --count-to-print The number of events to be received before printing\n" \ +" a result.\n" \ +" (default is 0xff0000 and 0x3f0000 for refs)\n" \ +" (maximum: 0x7fffffff)\n" \ +" -m, --multi-send The maximum number of events to be sent in multi event\n" \ +" sending. Valid only for multircv.\n" \ +" (default: 32) (maximum: 32)\n" \ +" -v, --vector-size The size of vector to be sent in vector loop.\n" \ +" Valid only for vectors. (default: 8) (maximum: 8)\n" \ +" -h, --help Display help and exit.\n" \ +"\n" + +/* + * Test configuration + */ + +/** Define maximum amount events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/** Define maximum amount events are in vector */ +#define MAX_VECTOR_SIZE 8 + +/** Define maximum data size in event. Note: reserved maximum amount from memory */ +#define MAX_DATA_SIZE 256 + +/** Define maximum amount of EOs and queues */ +#define MAX_NUM_EO 128 + +/** Define maximum amount of events */ +#define MAX_NUM_EVENTS 128 + +/** Define maximum count of events to be waited before print */ +#define MAX_PRINT_EVENT_COUNT 0x7fffffff + +typedef enum loop_type_t { + LOOP_TYPE_UNDEF = 0, + LOOP_TYPE_LOOP = 1, + LOOP_TYPE_VECTOR = 2, + LOOP_TYPE_MULTIRCV = 3, + LOOP_TYPE_REFS = 4, + LOOP_TYPE_PAIRS = 5 +} loop_type_t; + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_FMT \ +"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" + +/** + * Performance test statistics (per core) + */ +typedef struct { + int64_t events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; +} perf_stat_t; + +/** + * Performance test EO context + */ +typedef struct { + /* Next destination queue */ + em_queue_t dest; +} eo_context_t; + +/** + * Perf test shared memory, read-only after start-up, allow cache-line sharing + */ +typedef struct { + /* EO context table */ + eo_context_t eo_ctx_tbl[MAX_NUM_EO]; + /* EO table */ + em_eo_t eo_tbl[MAX_NUM_EO]; + /* Event pool used by this application */ + em_pool_t pool; + /* Vector pool used by this application */ + em_pool_t vec_pool; +} perf_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; + +/* Command line arguments specific to this application */ +static struct { + /* The queue type */ + uint8_t queue_type; + /* The loop type */ + uint32_t loop_type; + /* The print event count */ + int64_t print_event_count; + /* The number of events in queue */ + int number_event_per_queue; + /* Data size for events */ + int data_size; + /* Number of E0s and queues */ + int number_eo; + /* Number of events sent with send_multi */ + int num_multi_events; + /* Number of events per vector */ + int vector_size; + /* Free event allocations in every round */ + bool free_event; + /* Use priority for pairs */ + bool use_prio; +} perf_args; + +/** + * Core specific test statistics. + * + */ +static ENV_LOCAL perf_stat_t core_stat; + +/* + * Local function prototypes + */ + +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +perf_stop(void *eo_context, em_eo_t eo); + +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static void +perf_receive_free(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static void +perf_receive_pairs(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static void +perf_receive_pairs_free(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static void +perf_receive_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context); + +static void +perf_receive_multi_free(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context); + +static void +print_result(perf_stat_t *const perf_stat); + +static em_queue_prio_t +get_queue_priority(const int index); + +static void +set_default_opts(void); + +static void +update_default_opts(bool queue_type_set, bool count_to_print_set, bool number_event_per_queue_set); + +static void +print_loop_options(void); + +static const char +*get_loop_type_str(uint32_t loop_type); + +static void +create_vector_pool(void); + +static void +send_burst(em_queue_t queue, em_event_t *events); + +static void +alloc_send_events_ref(em_queue_t *queues); + +static void +alloc_send_events_multircv(em_queue_t *queues); + +static void +alloc_send_events(em_queue_t *queues, uint32_t num_eos); + +static void +alloc_send_events_vector(em_queue_t *queues); + +static void +create_queues_eos(uint32_t num_of_eos, em_queue_t *queues, + const char *eo_name, const char *queue_name, + uint8_t shift_eo_ctx_tbl, em_eo_t *eos); + +static void +sync_to_queues_and_start_eos(em_queue_t *queues, em_queue_t *target_queues, + uint32_t offset, uint32_t num_of_eos, em_eo_t *eos); + +static inline void send_event(em_event_t event, em_queue_t queue); + +static inline em_event_t alloc_event(int data_size, em_event_type_t type); + +static inline void start_measurement(void); + +static inline void end_measurement(void); + +static inline void restart_measurement(int64_t *events); + +static void +usage(char *progname) +{ + APPL_PRINT(USAGE_FMT, NO_PATH(progname), NO_PATH(progname)); +} + +static void parse_app_specific_args(int argc, char *argv[]) +{ + int pca_argv_idx = 0; + + /* Find the index of -- where getopt_long at cm_setup.c would stop */ + for (int i = 1; i < argc; i++) { + if (!strcmp(argv[i], "--")) { + pca_argv_idx = i; + break; + } + } + + set_default_opts(); + bool queue_type_set = false; + bool count_to_print_set = false; + bool number_event_per_queue_set = false; + + /* No app specific argument is given, skip parsing */ + if (!pca_argv_idx) + return; + + optind = pca_argv_idx + 1; /* start from '--' + 1 */ + + static const struct option longopts[] = { + {"loop", required_argument, NULL, 'l'}, + {"free", no_argument, NULL, 'f'}, + {"use-prio", no_argument, NULL, 'u'}, + {"data-size", required_argument, NULL, 'd'}, + {"eo-count", required_argument, NULL, 'e'}, + {"event-amount", required_argument, NULL, 'n'}, + {"queue-type", required_argument, NULL, 'q'}, + {"count-to-print", required_argument, NULL, 'c'}, + {"multi-send", required_argument, NULL, 'm'}, + {"vector-size", required_argument, NULL, 'v'}, + {"help", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0} + }; + static const char *shortopts = "l:fud:e:n:q:c:m:v:h"; + + while (1) { + int opt; + int long_idx; + + opt = getopt_long(argc, argv, shortopts, longopts, &long_idx); + + if (opt == -1) + break; /* No more options */ + + switch (opt) { + case 'l': { + /* Sanity check */ + if (*optarg == 'l') { + perf_args.loop_type = LOOP_TYPE_LOOP; + } else if (*optarg == 'v') { + perf_args.loop_type = LOOP_TYPE_VECTOR; + } else if (*optarg == 'm') { + perf_args.loop_type = LOOP_TYPE_MULTIRCV; + } else if (*optarg == 'r') { + perf_args.loop_type = LOOP_TYPE_REFS; + } else if (*optarg == 'p') { + perf_args.loop_type = LOOP_TYPE_PAIRS; + } else { + APPL_PRINT("Loop type must be l, v, m, r or p\n"); + APPL_EXIT_FAILURE("Invalid type of loop: %s", optarg); + } + } + break; + + case 'f': { + perf_args.free_event = true; + } + break; + + case 'u': { + perf_args.use_prio = true; + } + break; + + case 'd': { + perf_args.data_size = atoi(optarg); + if (perf_args.data_size <= 0 || + perf_args.data_size > MAX_DATA_SIZE) + APPL_EXIT_FAILURE("Invalid data size: %s", + optarg); + } + break; + + case 'e': { + perf_args.number_eo = atoi(optarg); + if (perf_args.number_eo <= 0 || + perf_args.number_eo > MAX_NUM_EO || + (perf_args.number_eo % 2)) + APPL_EXIT_FAILURE("Invalid EO amount: %s", + optarg); + } + break; + + case 'n': { + perf_args.number_event_per_queue = atoi(optarg); + number_event_per_queue_set = true; + if (perf_args.number_event_per_queue <= 0 || + perf_args.number_event_per_queue > MAX_NUM_EVENTS) + APPL_EXIT_FAILURE("Invalid events amount: %s", + optarg); + } + break; + + case 'q': { + /* Sanity check */ + if (*optarg == 'a') { + perf_args.queue_type = EM_QUEUE_TYPE_ATOMIC; + } else if (*optarg == 'p') { + perf_args.queue_type = EM_QUEUE_TYPE_PARALLEL; + } else { + APPL_PRINT("Queue type must be a (atomic) or p (parallel)\n"); + APPL_EXIT_FAILURE("Invalid type of queue: %s", optarg); + } + queue_type_set = true; + } + break; + + case 'c': { + char *end_ptr; + + perf_args.print_event_count = strtol(optarg, &end_ptr, 16); + count_to_print_set = true; + if (perf_args.print_event_count <= 0 || + perf_args.print_event_count > MAX_PRINT_EVENT_COUNT) + APPL_EXIT_FAILURE("Invalid print event count: %s", + optarg); + } + break; + + case 'm': { + perf_args.num_multi_events = atoi(optarg); + if (perf_args.num_multi_events <= 0 || + perf_args.num_multi_events > SEND_MULTI_MAX) + APPL_EXIT_FAILURE("Invalid multi-events count: %s", + optarg); + } + break; + + case 'v': { + perf_args.vector_size = atoi(optarg); + if (perf_args.vector_size <= 0 || + perf_args.vector_size > MAX_VECTOR_SIZE) + APPL_EXIT_FAILURE("Invalid vector size: %s", + optarg); + } + break; + + /* Note: must specify -h after -- to print usage info + * specific to this app. Otherwise, general EM-ODP usage + * will be displayed. + */ + case 'h': + usage(argv[0]); + exit(EXIT_SUCCESS); + break; + + case ':': + usage(argv[0]); + APPL_EXIT_FAILURE("Missing arguments!\n"); + break; + + default: + usage(argv[0]); + APPL_EXIT_FAILURE("Unknown option!\n"); + break; + } + } + + update_default_opts(queue_type_set, count_to_print_set, number_event_per_queue_set); + + print_loop_options(); + + /* Reset 'extern optind' to restart scanning in cm_setup() */ + optind = 1; +} + +static void +set_default_opts(void) +{ + /* Set default values */ + perf_args.loop_type = LOOP_TYPE_LOOP; + perf_args.free_event = false; + perf_args.use_prio = false; + perf_args.data_size = 250; + perf_args.number_eo = 128; + perf_args.number_event_per_queue = 32; + perf_args.queue_type = EM_QUEUE_TYPE_ATOMIC; + perf_args.print_event_count = 0xff0000; + perf_args.num_multi_events = 32; + perf_args.vector_size = 8; +} + +static void +update_default_opts(bool queue_type_set, bool count_to_print_set, bool number_event_per_queue_set) +{ + /* Update values which depends of LOOP_TYPE when parameters were not explicitly set */ + if (!number_event_per_queue_set) { + if (perf_args.loop_type == LOOP_TYPE_REFS || + perf_args.loop_type == LOOP_TYPE_MULTIRCV) { + perf_args.number_event_per_queue = 128; + } + } + if (!queue_type_set) { + if (perf_args.loop_type == LOOP_TYPE_REFS) + perf_args.queue_type = EM_QUEUE_TYPE_PARALLEL; + } + if (!count_to_print_set) { + if (perf_args.loop_type == LOOP_TYPE_REFS) + perf_args.print_event_count = 0x3f0000; + } + /* If loop type is vector, no free-event functionality supported */ + if (perf_args.loop_type == LOOP_TYPE_VECTOR) { + perf_args.free_event = false; + /* If loop type is vector, max number of events per queue is 32 */ + if (perf_args.number_event_per_queue > 32) + APPL_EXIT_FAILURE("With vector loop max nbr of events per queue is 32\n"); + } +} + +static void +print_loop_options(void) +{ + APPL_PRINT("EM loop options:\n"); + APPL_PRINT(" Loop type: %s\n", get_loop_type_str(perf_args.loop_type)); + APPL_PRINT(" Free: %s\n", perf_args.free_event ? "on" : "off"); + APPL_PRINT(" Priorities: %s\n", perf_args.use_prio ? "on" : "off"); + APPL_PRINT(" Data size: %d\n", perf_args.data_size); + APPL_PRINT(" EO count: %d\n", perf_args.number_eo); + APPL_PRINT(" Event amount: %d\n", perf_args.number_event_per_queue); + APPL_PRINT(" Queue type: %s\n", (perf_args.queue_type == EM_QUEUE_TYPE_ATOMIC) + ? "ATOMIC" : "PARALLEL"); + APPL_PRINT(" print count: 0x%lx / (%ld)\n", perf_args.print_event_count, + perf_args.print_event_count); + if (perf_args.loop_type == LOOP_TYPE_MULTIRCV) + APPL_PRINT(" multi send: %d\n", perf_args.num_multi_events); + if (perf_args.loop_type == LOOP_TYPE_VECTOR) + APPL_PRINT(" vector size: %d\n", perf_args.vector_size); +} + +static const char +*get_loop_type_str(uint32_t loop_type) +{ + const char *type_str; + + switch (loop_type) { + case LOOP_TYPE_UNDEF: + type_str = "UNDEF"; + break; + case LOOP_TYPE_LOOP: + type_str = "LOOP"; + break; + case LOOP_TYPE_VECTOR: + type_str = "VECTOR"; + break; + case LOOP_TYPE_REFS: + type_str = "REFS"; + break; + case LOOP_TYPE_MULTIRCV: + type_str = "MULTIRCV"; + break; + case LOOP_TYPE_PAIRS: + type_str = "PAIRS"; + break; + default: + type_str = "UNKNOWN"; + break; + } + + return type_str; +} + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + /* Parse arguments specific to this app */ + parse_app_specific_args(argc, argv); + + /* This shall be updated before threads are created */ + if (perf_args.loop_type == LOOP_TYPE_MULTIRCV) + core_stat.events = 0; + else + core_stat.events = -perf_args.print_event_count; + + return cm_setup(argc, argv); +} + +/** + * Init of the Loop performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void test_init(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(test_error_handler); + } else { + perf_shm = env_shared_lookup("PerfSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf init failed on EM-core:%u", em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Loop performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void test_start(const appl_conf_t *appl_conf) +{ + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%d\n" + " Application running on %u EM-cores (procs:%u, threads:%u)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* + * Create and start application EOs + * Send initial test events to the EOs' queues + */ + + switch (perf_args.loop_type) { + case LOOP_TYPE_LOOP: { + em_queue_t queues[perf_args.number_eo]; + em_eo_t eos[perf_args.number_eo]; + + create_queues_eos(perf_args.number_eo, queues, "loop-eo", + "queue A", 0, eos); + sync_to_queues_and_start_eos(queues, queues, 0, perf_args.number_eo, eos); + alloc_send_events(queues, perf_args.number_eo); + break; + } + + case LOOP_TYPE_VECTOR: { + em_queue_t queues[perf_args.number_eo]; + em_eo_t eos[perf_args.number_eo]; + + create_vector_pool(); + create_queues_eos(perf_args.number_eo, queues, "loop-eo", + "queue A", 0, eos); + sync_to_queues_and_start_eos(queues, queues, 0, perf_args.number_eo, eos); + alloc_send_events_vector(queues); + break; + } + + case LOOP_TYPE_MULTIRCV: { + em_queue_t queues[perf_args.number_eo]; + em_eo_t eos[perf_args.number_eo]; + + create_queues_eos(perf_args.number_eo, queues, "loop-eo", + "queue A", 0, eos); /*multircv create EOs!!!!!*/ + sync_to_queues_and_start_eos(queues, queues, 0, perf_args.number_eo, eos); + alloc_send_events_multircv(queues); + break; + } + + case LOOP_TYPE_REFS: { + em_queue_t queues[perf_args.number_eo]; + em_eo_t eos[perf_args.number_eo]; + + create_queues_eos(perf_args.number_eo, queues, "loop-eo", + "queue A", 0, eos); + sync_to_queues_and_start_eos(queues, queues, 0, perf_args.number_eo, eos); + alloc_send_events_ref(queues); + break; + } + + case LOOP_TYPE_PAIRS: { + int half_num_eo = perf_args.number_eo / 2; + em_queue_t queues_a[half_num_eo]; + em_queue_t queues_b[half_num_eo]; + em_eo_t eos_a[half_num_eo]; + em_eo_t eos_b[half_num_eo]; + + create_queues_eos(half_num_eo, queues_a, "pairs-eo-a", + "queue-A", 0, eos_a); + create_queues_eos(half_num_eo, queues_b, "pairs-eo-b", + "queue-B", half_num_eo, eos_b); + sync_to_queues_and_start_eos(queues_a, queues_b, 0, half_num_eo, eos_a); + sync_to_queues_and_start_eos(queues_b, queues_a, half_num_eo, + half_num_eo, eos_b); + alloc_send_events(queues_a, half_num_eo); + alloc_send_events(queues_b, half_num_eo); + break; + } + + default: + + break; + } +} + +static void +send_burst(em_queue_t queue, em_event_t *events) +{ + /* Send in bursts of events/vectors */ + const int send_rounds = perf_args.number_event_per_queue + / perf_args.num_multi_events; + const int left_over = perf_args.number_event_per_queue + % perf_args.num_multi_events; + int num_sent = 0; + int m, n; + + for (m = 0, n = 0; m < send_rounds; m++, n += perf_args.num_multi_events) { + num_sent += em_send_multi(&events[n], perf_args.num_multi_events, + queue); + } + if (left_over) { + num_sent += em_send_multi(&events[n], left_over, + queue); + } + test_fatal_if(num_sent != perf_args.number_event_per_queue, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, perf_args.number_event_per_queue, queue); +} + +static void +alloc_send_events_ref(em_queue_t *queues) +{ + em_event_t ev = em_alloc(perf_args.data_size, + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ev == EM_EVENT_UNDEF, + "Event allocation failed"); + for (int i = 0; i < perf_args.number_eo; i++) { + em_queue_t queue = queues[i]; + em_event_t events[perf_args.number_event_per_queue]; + + /* LOOP_TYPE_REFS */ + for (int j = 0; j < perf_args.number_event_per_queue; j++) { + em_event_t ref = em_event_ref(ev); + + test_fatal_if(ref == EM_EVENT_UNDEF, + "Event ref creation failed (%d, %d)", i, j); + events[j] = ref; + } + send_burst(queue, events); + } + /* Free the original event */ + em_free(ev); + env_sync_mem(); +} + +static void +alloc_send_events_multircv(em_queue_t *queues) +{ + for (int i = 0; i < perf_args.number_eo; i++) { + em_queue_t queue = queues[i]; + em_event_t events[perf_args.number_event_per_queue]; + + int num, tot = 0; + + /* Alloc and send test events */ + do { + num = em_alloc_multi(events, + perf_args.number_event_per_queue - tot, + perf_args.data_size, + EM_EVENT_TYPE_SW, perf_shm->pool); + tot += num; + } while (tot < num && num > 0); + test_fatal_if(tot != perf_args.number_event_per_queue, + "Allocated:%d of requested:%d events", + tot, perf_args.number_event_per_queue); + send_burst(queue, events); + } + env_sync_mem(); +} + +static void +alloc_send_events(em_queue_t *queues, uint32_t num_eos) +{ + for (uint32_t i = 0; i < num_eos; i++) { + em_event_t ev; + em_queue_t queue = queues[i]; + em_event_t events[perf_args.number_event_per_queue]; + + /* Alloc and send test events */ + for (int j = 0; j < perf_args.number_event_per_queue; j++) { + ev = em_alloc(perf_args.data_size, + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ev == EM_EVENT_UNDEF, + "Event allocation failed (%d, %d)", i, j); + events[j] = ev; + } + send_burst(queue, events); + } + env_sync_mem(); +} + +static void +alloc_send_events_vector(em_queue_t *queues) +{ + for (int i = 0; i < perf_args.number_eo; i++) { + em_event_t ev; + em_queue_t queue = queues[i]; + em_event_t events[perf_args.number_event_per_queue]; + + /* Alloc and send test vectors */ + for (int j = 0; j < perf_args.number_event_per_queue; j++) { + uint32_t vec_sz = (j % perf_args.vector_size) + 1; + em_event_t vec = em_alloc(vec_sz, EM_EVENT_TYPE_VECTOR, + perf_shm->vec_pool); + test_fatal_if(vec == EM_EVENT_UNDEF, + "Vector allocation failed (%d, %d)", i, j); + em_event_t *vectbl = NULL; + uint32_t curr_sz = em_event_vector_tbl(vec, &vectbl); + + test_fatal_if(curr_sz || !vectbl, + "Vector table invalid: sz=%d vectbl=%p)", + curr_sz, vectbl); + + for (uint32_t k = 0; k < vec_sz; k++) { + ev = em_alloc(perf_args.data_size, + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ev == EM_EVENT_UNDEF, + "Event allocation failed (%d, %d)", i, j); + vectbl[k] = ev; + } + em_event_vector_size_set(vec, vec_sz); + + events[j] = vec; + } + send_burst(queue, events); + } + env_sync_mem(); +} + +static void +create_vector_pool(void) +{ + em_pool_cfg_t vec_pool_cfg; + em_pool_t vec_pool = EM_POOL_UNDEF; + + /* For vectors will be initiated vector pool, not needed for other loops but doesn't harm */ + em_pool_cfg_init(&vec_pool_cfg); + vec_pool_cfg.event_type = EM_EVENT_TYPE_VECTOR; + vec_pool_cfg.num_subpools = 1; + vec_pool_cfg.subpool[0].cache_size = 0; /* all allocated in startup */ + vec_pool_cfg.subpool[0].num = perf_args.number_eo * perf_args.number_event_per_queue; + vec_pool_cfg.subpool[0].size = perf_args.vector_size; + + vec_pool = em_pool_create("vector-pool", EM_POOL_UNDEF, &vec_pool_cfg); + test_fatal_if(vec_pool == EM_POOL_UNDEF, "vector pool create failed!"); + + perf_shm->vec_pool = vec_pool; +} + +static void +create_queues_eos(uint32_t num_of_eos, em_queue_t *queues, + const char *eo_name, const char *queue_name, + uint8_t shift_eo_ctx_tbl, em_eo_t *eos) +{ + em_queue_t queue; + eo_context_t *eo_ctx; + em_eo_t eo; + em_eo_multircv_param_t eo_param; + em_receive_func_t recv_fn; + + for (uint32_t i = 0; i < num_of_eos; i++) { + /* Create the EO's loop queue */ + queue = em_queue_create(queue_name, perf_args.queue_type, + get_queue_priority(i), + EM_QUEUE_GROUP_DEFAULT, NULL); + + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue creation failed, round:%d", i); + queues[i] = queue; + + /* Create the EO */ + switch (perf_args.loop_type) { + case LOOP_TYPE_MULTIRCV: + /* Init & create the EO */ + em_eo_multircv_param_init(&eo_param); + /* Set EO params needed by this application */ + eo_param.start = perf_start; + eo_param.stop = perf_stop; + if (perf_args.free_event) + eo_param.receive_multi = perf_receive_multi_free; + else + eo_param.receive_multi = perf_receive_multi; + /* eo_param.max_events = use default; */ + eo = em_eo_create_multircv("loop-eo", &eo_param); + break; + case LOOP_TYPE_PAIRS: + if (perf_args.free_event) + recv_fn = perf_receive_pairs_free; + else + recv_fn = perf_receive_pairs; + eo_ctx = &perf_shm->eo_ctx_tbl[i + shift_eo_ctx_tbl]; + eo = em_eo_create(eo_name, perf_start, NULL, perf_stop, NULL, + recv_fn, eo_ctx); + break; + case LOOP_TYPE_LOOP: + case LOOP_TYPE_VECTOR: + case LOOP_TYPE_REFS: + if (perf_args.free_event) + recv_fn = perf_receive_free; + else + recv_fn = perf_receive; + eo_ctx = &perf_shm->eo_ctx_tbl[i + shift_eo_ctx_tbl]; + eo = em_eo_create(eo_name, perf_start, NULL, perf_stop, NULL, + recv_fn, eo_ctx); + break; + default: + eo = EM_EO_UNDEF; + } + test_fatal_if(eo == EM_EO_UNDEF, + "EO(%d) creation failed!", i); + eos[i] = eo; + } +} + +static void +sync_to_queues_and_start_eos(em_queue_t *queues, em_queue_t *target_queues, + uint32_t offset, uint32_t num_of_eos, em_eo_t *eos) +{ + em_status_t ret, start_ret = EM_ERROR; + + for (uint32_t i = 0; i < num_of_eos; i++) { + eo_context_t *eo_ctx = &perf_shm->eo_ctx_tbl[i + offset]; + + eo_ctx->dest = target_queues[i]; + perf_shm->eo_tbl[i + offset] = eos[i]; + ret = em_eo_add_queue_sync(eos[i], queues[i]); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eos[i], queues[i]); + + ret = em_eo_start_sync(eos[i], &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + } +} + +void test_stop(const appl_conf_t *appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + for (i = 0; i < perf_args.number_eo; i++) { + /* Stop & delete EO */ + eo = perf_shm->eo_tbl[i]; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } + + if (perf_args.loop_type == LOOP_TYPE_VECTOR) + em_pool_delete(perf_shm->vec_pool); +} + +void test_term(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + const int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(perf_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + */ +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)eo_context; + (void)eo; + (void)conf; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +perf_stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + /* Remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + return ret; +} + +static inline void send_event(em_event_t event, em_queue_t queue) +{ + em_status_t ret = em_send(event, queue); + + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, queue); + } +} + +static inline em_event_t alloc_event(int data_size, em_event_type_t type) +{ + em_event_t event = em_alloc(data_size, type, perf_shm->pool); + + test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); + return event; +} + +static inline void start_measurement(void) +{ + core_stat.begin_cycles = env_get_cycle(); +} + +static inline void end_measurement(void) +{ + core_stat.end_cycles = env_get_cycle(); + core_stat.print_count += 1; + print_result(&core_stat); +} + +static inline void restart_measurement(int64_t *events) +{ + *events = -1; /* +1 below => 0 */ +} + +/** + * @private + * + * EO receive function. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + int64_t events = core_stat.events; + + (void)eo_context; + (void)type; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + start_measurement(); + } else if (unlikely(events == perf_args.print_event_count)) { + end_measurement(); + restart_measurement(&events); + } + + send_event(event, queue); + + events++; + core_stat.events = events; +} + +/** + * @private + * + * EO receive function. Freeing and recreating event every round + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive_free(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + int64_t events = core_stat.events; + + (void)eo_context; + (void)type; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + start_measurement(); + } else if (unlikely(events == perf_args.print_event_count)) { + end_measurement(); + restart_measurement(&events); + } + + em_free(event); + event = alloc_event(perf_args.data_size, type); + + send_event(event, queue); + + events++; + core_stat.events = events; +} + +/** + * @private + * + * EO pairs receive function. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive_pairs(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + int64_t events = core_stat.events; + eo_context_t *const eo_ctx = eo_context; + const em_queue_t dst_queue = eo_ctx->dest; + + (void)type; + (void)queue; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + start_measurement(); + } else if (unlikely(events == perf_args.print_event_count)) { + end_measurement(); + restart_measurement(&events); + } + + send_event(event, dst_queue); + + events++; + core_stat.events = events; +} + +/** + * @private + * + * EO pairs receive function. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive_pairs_free(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + int64_t events = core_stat.events; + eo_context_t *const eo_ctx = eo_context; + const em_queue_t dst_queue = eo_ctx->dest; + + (void)type; + (void)queue; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + start_measurement(); + } else if (unlikely(events == perf_args.print_event_count)) { + end_measurement(); + restart_measurement(&events); + } + + em_free(event); + event = alloc_event(perf_args.data_size, type); + + send_event(event, dst_queue); + + events++; + core_stat.events = events; +} + +/** + * @private + * + * EO receive function for Multircv loops + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context) +{ + int64_t event_count = core_stat.events; + int ret; + + (void)eo_context; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free_multi(event_tbl, num); + return; + } + + if (unlikely(event_count == 0)) { + start_measurement(); + } else if (unlikely(event_count >= perf_args.print_event_count)) { + end_measurement(); + event_count = -num; /* +num below => 0 */ + } + + ret = em_send_multi(event_tbl, num, queue); + if (unlikely(ret != num)) { + em_free_multi(&event_tbl[ret], num - ret); + test_fatal_if(!appl_shm->exit_flag, + "Send-multi:%d Num:%d Queue:%" PRI_QUEUE "", + ret, num, queue); + } + + event_count += num; + core_stat.events = event_count; +} + +/** + * @private + * + * EO receive function for Multircv loops freeing and recreating event every round + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive_multi_free(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context) +{ + int64_t event_count = core_stat.events; + int ret; + + (void)eo_context; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free_multi(event_tbl, num); + return; + } + + if (unlikely(event_count == 0)) { + start_measurement(); + } else if (unlikely(event_count >= perf_args.print_event_count)) { + end_measurement(); + event_count = -num; /* +num below => 0 */ + } + + em_free_multi(event_tbl, num); + ret = em_alloc_multi(event_tbl, num, perf_args.data_size, + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ret != num, "Allocated %d of num:%d events", + ret, num); + + ret = em_send_multi(event_tbl, num, queue); + if (unlikely(ret != num)) { + em_free_multi(&event_tbl[ret], num - ret); + test_fatal_if(!appl_shm->exit_flag, + "Send-multi:%d Num:%d Queue:%" PRI_QUEUE "", + ret, num, queue); + } + + event_count += num; + core_stat.events = event_count; +} + +/** + * Get queue priority value based on the index number. + * + * @param Queue index + * + * @return Queue priority value + * + * @note Priority distribution: 40% LOW, 40% NORMAL, 20% HIGH + */ +static em_queue_prio_t +get_queue_priority(const int queue_index) +{ + em_queue_prio_t prio; + + if (perf_args.use_prio) { + int remainder = queue_index % 5; + + if (remainder <= 1) + prio = EM_QUEUE_PRIO_LOW; + else if (remainder <= 3) + prio = EM_QUEUE_PRIO_NORMAL; + else + prio = EM_QUEUE_PRIO_HIGH; + } else { + prio = EM_QUEUE_PRIO_NORMAL; + } + + return prio; +} + +/** + * Prints test measurement result + */ +static void +print_result(perf_stat_t *const perf_stat) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event, events_per_sec; + uint64_t print_count; + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + diff = env_cycles_diff(perf_stat->end_cycles, perf_stat->begin_cycles); + + print_count = perf_stat->print_count; + cycles_per_event = ((double)diff) / ((double)perf_stat->events); + events_per_sec = mhz / cycles_per_event; /* Million events/s */ + + APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, + mhz, em_core_id(), print_count); +} diff --git a/programs/performance/timer_test.c b/programs/performance/timer_test.c index 3f285d89..0dd50d64 100644 --- a/programs/performance/timer_test.c +++ b/programs/performance/timer_test.c @@ -1,1355 +1,1366 @@ -/* - * Copyright (c) 2017-2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine timer basic test. - * - * Simple test for timer (does not test everything). Creates and deletes random - * timers and checks how accurate the timeout indications are against timer - * itself and also linux time (clock_gettime). Single EO, but receiving queue - * is parallel so multiple threads can process timeouts concurrently. - * - * Exception/error management is simplified and aborts on most errors. - * - */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -#define TEST_VERSION "v1.4" - -/* - * Test app defines. - * Be careful, conflicting values may not be checked! - */ -#define APP_TIMER_RESOLUTION_US 1000 /* requested em-timer resolution */ -#define APP_TIMEOUT_MAX_US (10000ULL * 1000ULL) /* max random timeout */ -#define APP_TIMEOUT_MIN_US 5000 /* minimum random timeout */ -#define APP_MAX_TMOS 1000 /* simultaneous oneshots */ -#define APP_MAX_PERIODIC 300 /* simultaneous periodic */ -#define APP_PRINT_EACH_TMO 0 /* 0 to only print summary */ -#define APP_PRINT_DOTS 1 /* visual progress dots */ -#define APP_VISUAL_DEBUG 0 /* 0|1, for testing only. Slow, but visual */ -#define APP_EXTRA_PRINTS 0 /* debugging helper */ -#define APP_PER_CANCEL_CHK 0 /* do not use yet, WIP */ - -#define APP_SHMEM_NAME "TimerTestShMem" -#define APP_HEARTBEAT_MS 2000 /* heartbeat tick period */ -#define APP_CHECK_COUNT (APP_TIMEOUT_MAX_US / 1000 / APP_HEARTBEAT_MS) -#define APP_CHECK_LIMIT (3 * (APP_CHECK_COUNT + 1)) /* num HB */ -#define APP_CHECK_GUARD 6 /* num HB */ - -#define APP_CANCEL_MODULO_P (APP_MAX_PERIODIC * 50) /* cancel propability*/ -#define APP_CANCEL_MODULO (APP_MAX_TMOS * 5) /* cancel propability */ -#define APP_CANCEL_MARGIN_NS 100000 /* check limit for cancel fail ok */ -#define APP_LINUX_CLOCK_SRC CLOCK_MONOTONIC /* for clock_gettime */ -#define APP_INCREASING_DLY 7 /* if not 0, add this to increasing - * delay before calling periodic timer ack - */ -#define APP_INC_DLY_MODULO 15 /* apply increasing delay to every Nth tmo*/ - -#if APP_VISUAL_DEBUG -#define VISUAL_DBG(x) APPL_PRINT(x) -#else -#define VISUAL_DBG(x) do {} while (0) -#endif - -#define APP_EO_NAME "Test EO" - -/** - * Test application message event - */ -typedef enum app_cmd_t { - APP_CMD_HEARTBEAT, - APP_CMD_TMO_SINGLE, - APP_CMD_TMO_PERIODIC -} app_cmd_t; - -typedef struct app_msg_t { - app_cmd_t command; - int index; - int dummy_delay; -} app_msg_t; - -typedef struct app_tmo_data_t { - em_tmo_t tmo ENV_CACHE_LINE_ALIGNED; - em_event_t event; - em_timer_tick_t when; - em_timer_tick_t howmuch; - em_timer_tick_t appeared; - struct timespec linux_when; - struct timespec linux_appeared; - em_timer_tick_t canceled; /* acts as flag, but also stores tick when done */ - em_timer_tick_t waitevt; /* acts as flag, but also stores tick when done */ - atomic_flag lock; /* used when adding timestamps or cancelling */ - unsigned int max_dummy; -} app_tmo_data_t; - -typedef enum app_test_state_t { - APP_STATE_IDLE = 0, - APP_STATE_RUNNING, - APP_STATE_STOPPING, - APP_STATE_CHECKING -} app_test_state_t; - -const char *dot_marks = " .-#"; /* per state above */ - -/** - * EO context - * - * Shared data. Concurrently manipulated fields use C atomics - */ -typedef struct app_eo_ctx_t { - em_tmo_t heartbeat_tmo; - uint64_t heartbeat_count; - uint64_t heartbeat_target; - em_queue_t my_q; - em_queue_t my_prio_q; - uint64_t hz; - uint64_t linux_hz; - uint64_t rounds; - int nocancel; - - atomic_int state; - atomic_uint errors; - atomic_uint ack_errors; - - int64_t min_diff; - int64_t max_diff; - int64_t min_diff_l; - int64_t max_diff_l; - unsigned int max_dummy; - uint64_t min_tmo; - uint64_t res_ns; - - struct { - app_tmo_data_t tmo[APP_MAX_TMOS]; - - atomic_uint_fast64_t received ENV_CACHE_LINE_ALIGNED; - atomic_uint_fast64_t cancelled; - atomic_uint_fast64_t cancel_fail; - } oneshot; - - struct { - app_tmo_data_t tmo[APP_MAX_PERIODIC]; - - atomic_uint_fast64_t received ENV_CACHE_LINE_ALIGNED; - atomic_uint_fast64_t cancelled; - atomic_uint_fast64_t cancel_fail; - } periodic; -} app_eo_ctx_t; - -/** - * Timer test shared memory data - */ -typedef struct timer_app_shm_t { - /* Event pool used by this application */ - em_pool_t pool; - /* EO context data */ - app_eo_ctx_t eo_context; - /* Event timer handle */ - em_timer_t tmr; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} timer_app_shm_t; - -/* EM-thread locals */ -static ENV_LOCAL timer_app_shm_t *m_shm; -static ENV_LOCAL unsigned int m_randseed; - -/* Local function prototypes */ -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, - const em_eo_conf_t *conf); -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, - void *q_context); - -static em_timer_tick_t rand_timeout(unsigned int *seed, app_eo_ctx_t *eo_ctx, - unsigned int fixed); -static void set_timeouts(app_eo_ctx_t *eo_ctx); -static void start_test(app_eo_ctx_t *eo_ctx); -static void check_test(app_eo_ctx_t *eo_ctx); -static void stop_test(app_eo_ctx_t *eo_ctx); -static void cleanup_test(app_eo_ctx_t *eo_ctx); -static int64_t ts_diff_ns(struct timespec *ts1, struct timespec *ts2); -static int64_t tick_diff_ns(em_timer_tick_t t1, em_timer_tick_t t2, - uint64_t hz); -static void random_cancel(app_eo_ctx_t *eo_ctx); -static em_event_t random_cancel_periodic(app_eo_ctx_t *eo_ctx); -static unsigned int check_single(app_eo_ctx_t *eo_ctx); -static unsigned int check_periodic(app_eo_ctx_t *eo_ctx); -static void dummy_processing(unsigned int us); -static int handle_periodic_event(app_eo_ctx_t *eo_ctx, em_event_t event, - app_msg_t *msgin); -static void handle_single_event(app_eo_ctx_t *eo_ctx, em_event_t event, - app_msg_t *msgin); -static void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_queue_t queue); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Local EO error handler. Prevents error when ack() is done after cancel() - * since it's normal here. - * - * @param eo Execution object id - * @param error The error code - * @param escope Error scope - * @param args List of arguments (__FILE__, __func__, __LINE__, - * (format), ## __VA_ARGS__) - * - * @return The original error code. - */ -static em_status_t eo_error_handler(em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args) -{ - VISUAL_DBG("E"); - atomic_fetch_add_explicit(&m_shm->eo_context.errors, 1, memory_order_relaxed); - return test_error_handler(eo, error, escope, args); -} - -/** - * Before EM - Init of the test application. - * - * The shared memory is needed if EM instance runs on multiple processes. - * Doing it like this makes it possible to run the app both as threads (-t) - * as well as processes (-p). - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void test_init(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - /* first core creates ShMem */ - if (core == 0) { - m_shm = env_shared_reserve(APP_SHMEM_NAME, - sizeof(timer_app_shm_t)); - /* initialize it */ - if (m_shm) - memset(m_shm, 0, sizeof(timer_app_shm_t)); - - em_register_error_handler(test_error_handler); - if (APP_EXTRA_PRINTS) - APPL_PRINT("%ldk shared memory for app context\n", - sizeof(timer_app_shm_t) / 1000); - - } else { - m_shm = env_shared_lookup(APP_SHMEM_NAME); - } - - if (m_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "ShMem init failed on EM-core: %u", - em_core_id()); - } - - APPL_PRINT("core %d: %s done\n", core, __func__); -} - -/** - * Startup of the timer test EM application. - * - * At this point EM is up, but no EOs exist. EM API can be used to create - * queues, EOs etc. - * - * @attention Run only on one EM core. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void test_start(const appl_conf_t *appl_conf) -{ - em_eo_t eo; - em_timer_attr_t attr; - em_timer_res_param_t resparam; - em_queue_t queue; - em_status_t stat; - app_eo_ctx_t *eo_ctx; - em_event_t event; - app_msg_t *msg; - struct timespec ts; - uint64_t period; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - m_shm->pool = appl_conf->pools[0]; - else - m_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%d\n" - " Application running on %u EM-cores (procs:%u, threads:%u)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, - m_shm->pool); - - test_fatal_if(m_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* Create EO */ - eo_ctx = &m_shm->eo_context; - eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, - app_eo_stop, NULL, app_eo_receive, eo_ctx); - test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); - - /* atomic queue for control */ - queue = em_queue_create("Control Q", - EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create queue!"); - - eo_ctx->my_q = queue; - /* another parallel high priority for timeout handling*/ - queue = em_queue_create("Tmo Q", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_HIGHEST, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create queue!"); - - eo_ctx->my_prio_q = queue; - - stat = em_eo_register_error_handler(eo, eo_error_handler); - test_fatal_if(stat != EM_OK, "Failed to register EO error handler"); - - /* create shared timer and store handle in - * shared memory. Require the configured app values - */ - em_timer_attr_init(&attr); - - /* going to change resolution, so need to check limits */ - memset(&resparam, 0, sizeof(em_timer_res_param_t)); - resparam.res_ns = APP_TIMER_RESOLUTION_US * 1000ULL; - stat = em_timer_res_capability(&resparam, EM_TIMER_CLKSRC_DEFAULT); - test_fatal_if(stat != EM_OK, "Timer does not support the resolution"); - - strncpy(attr.name, "TestTimer", EM_TIMER_NAME_LEN); - attr.num_tmo = APP_MAX_TMOS + APP_MAX_PERIODIC + 1; - attr.resparam = resparam; - attr.resparam.res_hz = 0; - m_shm->tmr = em_timer_create(&attr); - test_fatal_if(m_shm->tmr == EM_TIMER_UNDEF, "Failed to create timer!"); - - eo_ctx->min_tmo = resparam.min_tmo; - - /* Start EO */ - stat = em_eo_start_sync(eo, NULL, NULL); - test_fatal_if(stat != EM_OK, "Failed to start EO!"); - - /* create periodic timeout for heartbeat */ - eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_PERIODIC, - eo_ctx->my_q); - test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, - "Can't allocate heartbeat_tmo!\n"); - - event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", - sizeof(app_msg_t)); - - msg = em_event_pointer(event); - msg->command = APP_CMD_HEARTBEAT; - eo_ctx->hz = em_timer_get_freq(m_shm->tmr); - if (eo_ctx->hz < 100) - APPL_ERROR("WARNING - timer hz very low!\n"); - - /* linux time check */ - test_fatal_if(clock_getres(APP_LINUX_CLOCK_SRC, &ts) != 0, - "clock_getres() failed!\n"); - - period = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); - eo_ctx->linux_hz = 1000000000ULL / period; - APPL_PRINT("Linux reports clock running at %" PRIu64 " hz\n", eo_ctx->linux_hz); - - /* start heartbeat, will later start the test */ - period = (APP_HEARTBEAT_MS * eo_ctx->hz) / 1000; - test_fatal_if(period < 1, "timer resolution is too low!\n"); - - stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, period, event); - test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); - - APPL_PRINT("%s done, test repetition interval %ds\n\n", __func__, - (int)((APP_HEARTBEAT_MS * APP_CHECK_LIMIT) / 1000)); -} - -void test_stop(const appl_conf_t *appl_conf) -{ - const int core = em_core_id(); - em_status_t ret; - em_eo_t eo; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - eo = em_eo_find(APP_EO_NAME); - test_fatal_if(eo == EM_EO_UNDEF, - "Could not find EO:%s", APP_EO_NAME); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - - ret = em_timer_delete(m_shm->tmr); - test_fatal_if(ret != EM_OK, - "Timer:%" PRI_TMR " delete:%" PRI_STAT "", - m_shm->tmr, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); -} - -void test_term(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - APPL_PRINT("%s() on EM-core %d\n", __func__, em_core_id()); - if (m_shm != NULL) { - env_shared_free(m_shm); - m_shm = NULL; - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - */ -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, - const em_eo_conf_t *conf) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - em_timer_attr_t attr; - em_timer_t tmr; - int num_timers; - - (void)eo; - (void)conf; - - APPL_PRINT("timer_test %s\n", TEST_VERSION); - APPL_PRINT("EO start\n"); - - num_timers = em_timer_get_all(&tmr, 1); - APPL_PRINT("System has %d timer(s)\n", num_timers); - - if (APP_EXTRA_PRINTS) { - if (__atomic_always_lock_free(sizeof(uint64_t), NULL)) - APPL_PRINT("64b atomics are lock-free\n"); - else - APPL_PRINT("64b atomics may use locks\n"); - } - - if (em_timer_get_attr(m_shm->tmr, &attr) != EM_OK) { - APPL_ERROR("Can't get timer info\n"); - return EM_ERR_BAD_ID; - } - APPL_PRINT("Timer \"%s\" info:\n", attr.name); - APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); - APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); - APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); - APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); - APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", - em_timer_get_freq(m_shm->tmr)); - - eo_ctx->res_ns = attr.resparam.res_ns; - - if (APP_INCREASING_DLY) { - APPL_PRINT("Using increasing processing delay (%d, 1/%d)\n", - APP_INCREASING_DLY, APP_INC_DLY_MODULO); - } - - /* init other local EO context */ - eo_ctx->min_diff = INT64_MAX; - eo_ctx->max_diff = 0; - eo_ctx->min_diff_l = INT64_MAX; - eo_ctx->max_diff_l = 0; - - return EM_OK; -} - -/** - * @private - * - * EO per thread start function. - */ -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - - (void)eo_ctx; - (void)eo; - - /* per-thread random seed */ - m_randseed = time(NULL); - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - em_event_t event = EM_EVENT_UNDEF; - em_status_t ret; - - APPL_PRINT("EO stop\n"); - - if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { - em_tmo_delete(eo_ctx->heartbeat_tmo, &event); - eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; - if (event != EM_EVENT_UNDEF) - em_free(event); - } - - cleanup_test(eo_ctx); - - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - return EM_OK; -} - -/** - * @private - * - * EO receive function. Runs the example test app after initialization. - */ -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, - void *q_context) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - int reuse = 0; - - (void)q_context; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - VISUAL_DBG("e"); - - if (type == EM_EVENT_TYPE_SW) { - app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); - - switch (msgin->command) { - case APP_CMD_HEARTBEAT: /* uses atomic queue */ - VISUAL_DBG("H"); - handle_heartbeat(eo_ctx, queue); - if (em_tmo_ack(eo_ctx->heartbeat_tmo, event) != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Heartbeat ack() failed!\n"); - reuse = 1; - break; - - case APP_CMD_TMO_SINGLE: /* parallel queue */ - VISUAL_DBG("s"); - if (queue != eo_ctx->my_prio_q) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "tmo from wrong queue!\n"); - handle_single_event(eo_ctx, event, msgin); - break; - - case APP_CMD_TMO_PERIODIC: /* parallel queue */ - VISUAL_DBG("p"); - if (queue != eo_ctx->my_prio_q) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "tmo from wrong queue!\n"); - reuse = handle_periodic_event(eo_ctx, event, msgin); - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Invalid event received!\n"); - } - } else { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Invalid event type received!\n"); - } - - if (!reuse) - em_free(event); -} - -void handle_single_event(app_eo_ctx_t *eo_ctx, em_event_t event, - app_msg_t *msgin) -{ - (void)event; - - /* not expecting oneshot after run state */ - if (atomic_load_explicit(&eo_ctx->state, memory_order_acquire) != APP_STATE_RUNNING) { - APPL_PRINT("ERR: Tmo received after test finish\n"); - eo_ctx->errors++; - return; - } - if (msgin->index < 0 || msgin->index >= APP_MAX_TMOS) { - APPL_PRINT("ERR: tmo index out of range. Corrupted event?\n"); - eo_ctx->errors++; - return; - } - if (eo_ctx->oneshot.tmo[msgin->index].appeared) { - APPL_PRINT("ERR: Single Tmo received twice\n"); - eo_ctx->errors++; - return; - } - - /* lock tmo to avoid race with random cancel by another core */ - while (atomic_flag_test_and_set_explicit(&eo_ctx->oneshot.tmo[msgin->index].lock, - memory_order_acquire)) - ; - - eo_ctx->oneshot.tmo[msgin->index].appeared = em_timer_current_tick(m_shm->tmr); - clock_gettime(APP_LINUX_CLOCK_SRC, &eo_ctx->oneshot.tmo[msgin->index].linux_appeared); - atomic_flag_clear_explicit(&eo_ctx->oneshot.tmo[msgin->index].lock, memory_order_release); - atomic_fetch_add_explicit(&eo_ctx->oneshot.received, 1, memory_order_relaxed); - - if (!eo_ctx->nocancel) - random_cancel(eo_ctx); -} - -int handle_periodic_event(app_eo_ctx_t *eo_ctx, em_event_t event, - app_msg_t *msgin) -{ - int reuse = 0; - - if (msgin->index < 0 || msgin->index >= APP_MAX_PERIODIC) { - APPL_PRINT("ERR: Periodic tmo index out of range\n"); - eo_ctx->errors++; - return reuse; - } - int state = atomic_load_explicit(&eo_ctx->state, memory_order_acquire); - - if (state != APP_STATE_RUNNING && state != APP_STATE_STOPPING) { - APPL_PRINT("ERR: Periodic tmo received after test finish\n"); - eo_ctx->errors++; - return reuse; - } - - while (atomic_flag_test_and_set_explicit(&eo_ctx->periodic.tmo[msgin->index].lock, - memory_order_acquire)) - ; - - eo_ctx->periodic.tmo[msgin->index].appeared = em_timer_current_tick(m_shm->tmr); - atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[msgin->index].lock, memory_order_release); - atomic_fetch_add_explicit(&eo_ctx->periodic.received, 1, memory_order_relaxed); - - /* periodic tmo may keep coming a while after end of round */ - if (atomic_load_explicit(&eo_ctx->state, memory_order_acquire) == APP_STATE_STOPPING) - return 0; - - reuse = 1; - if (APP_INCREASING_DLY && msgin->dummy_delay) { - /* add delay before ack() to test late ack */ - dummy_processing(msgin->dummy_delay); - msgin->dummy_delay += APP_INCREASING_DLY; - eo_ctx->periodic.tmo[msgin->index].max_dummy = msgin->dummy_delay; - } - em_status_t ret = em_tmo_ack(eo_ctx->periodic.tmo[msgin->index].tmo, event); - - if (ret == EM_ERR_CANCELED) { - if (!eo_ctx->periodic.tmo[msgin->index].canceled && - !eo_ctx->periodic.tmo[msgin->index].waitevt) { - eo_ctx->ack_errors++; - reuse = 0; - } - } else { - if (ret != EM_OK) { - APPL_PRINT("em_tmo_ack error:%" PRI_STAT "\n", ret); - eo_ctx->ack_errors++; - reuse = 0; - } - } - if (!eo_ctx->nocancel) { - if (random_cancel_periodic(eo_ctx) == event) - reuse = 1; - } - return reuse; -} - -/* handle beartbeat, i.e. run state machine */ -void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_queue_t queue) -{ - if (queue != eo_ctx->my_q) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "heartbeat from wrong queue!\n"); - } - - eo_ctx->heartbeat_count++; - - if (APP_PRINT_DOTS) { - char ch = dot_marks[eo_ctx->state]; - - if (ch != ' ') - APPL_PRINT("%c", ch); - } - - /* reached next state change */ - if (eo_ctx->heartbeat_count >= eo_ctx->heartbeat_target) { - switch (atomic_load_explicit(&eo_ctx->state, memory_order_acquire)) { - case APP_STATE_IDLE: - start_test(eo_ctx); - eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_LIMIT; - break; - case APP_STATE_RUNNING: - stop_test(eo_ctx); - eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_GUARD; - break; - case APP_STATE_STOPPING: - check_test(eo_ctx); - eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_GUARD; - break; - case APP_STATE_CHECKING: - cleanup_test(eo_ctx); - eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_GUARD; - break; - default: - break; - } - } -} - -/* new random timeout APP_TIMEOUT_MIN_US ... APP_TIMEOUT_MAX_US in ticks */ -em_timer_tick_t rand_timeout(unsigned int *seed, app_eo_ctx_t *eo_ctx, - unsigned int fixed) -{ - uint64_t us; - double tick_ns = 1000000000.0 / (double)eo_ctx->hz; - - if (fixed) { - us = fixed; - } else { - us = (uint64_t)rand_r(seed) % (APP_TIMEOUT_MAX_US - APP_TIMEOUT_MIN_US + 1); - us += APP_TIMEOUT_MIN_US; - } - - return (em_timer_tick_t)((double)us * 1000.0 / tick_ns); -} - -/* start new batch of random timeouts */ -void set_timeouts(app_eo_ctx_t *eo_ctx) -{ - app_msg_t *msg; - int i; - uint64_t t1, t2; - struct timespec ts1, ts2; - - /* timeouts allocate new events every time (could re-use old ones). - * Do this first so we can time just the tmo creation - */ - for (i = 0; i < APP_MAX_TMOS; i++) { - em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, - m_shm->pool); - if (event == EM_EVENT_UNDEF) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't allocate event nr %d!", i + 1); - - /* prepare as timeout event */ - msg = em_event_pointer(event); - msg->command = APP_CMD_TMO_SINGLE; - msg->index = i; - msg->dummy_delay = 0; - memset(&eo_ctx->oneshot.tmo[i], 0, sizeof(app_tmo_data_t)); - eo_ctx->oneshot.tmo[i].event = event; - } - - t1 = em_timer_current_tick(m_shm->tmr); - clock_gettime(APP_LINUX_CLOCK_SRC, &ts1); - /* allocate new tmos every time (could re-use) */ - for (i = 0; i < APP_MAX_TMOS; i++) { - em_tmo_t tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_ONESHOT, - eo_ctx->my_prio_q); - - if (unlikely(tmo == EM_TMO_UNDEF)) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't allocate tmo nr %d!", i + 1); - - eo_ctx->oneshot.tmo[i].tmo = tmo; - } - - t2 = em_timer_current_tick(m_shm->tmr); - clock_gettime(APP_LINUX_CLOCK_SRC, &ts2); - APPL_PRINT("Timer: Creating %d timeouts took %" PRIu64 " ns (%" PRIu64 - " ns each)\n", i, - tick_diff_ns(t1, t2, eo_ctx->hz), - tick_diff_ns(t1, t2, eo_ctx->hz) / APP_MAX_TMOS); - APPL_PRINT("Linux: Creating %d timeouts took %" PRIu64 " ns (%" PRIu64 - " ns each)\n", i, ts_diff_ns(&ts1, &ts2), - ts_diff_ns(&ts1, &ts2) / APP_MAX_TMOS); - - /* start them all. Some might be served before this loop ends! */ - for (i = 0; i < APP_MAX_TMOS; i++) { - unsigned int fixed = 0; - - /* always test min and max tmo */ - if (i == 0) - fixed = APP_TIMEOUT_MAX_US; - else if (i == 1) - fixed = APP_TIMEOUT_MIN_US; - - eo_ctx->oneshot.tmo[i].howmuch = rand_timeout(&m_randseed, - eo_ctx, fixed); - eo_ctx->oneshot.tmo[i].when = em_timer_current_tick(m_shm->tmr); - clock_gettime(APP_LINUX_CLOCK_SRC, - &eo_ctx->oneshot.tmo[i].linux_when); - if (em_tmo_set_rel(eo_ctx->oneshot.tmo[i].tmo, - eo_ctx->oneshot.tmo[i].howmuch, - eo_ctx->oneshot.tmo[i].event) != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't activate tmo!\n"); - } - if (APP_MAX_TMOS) - APPL_PRINT("Started single shots\n"); - - /* then periodic */ - for (i = 0; i < APP_MAX_PERIODIC; i++) { - unsigned int fixed = 0; - - em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, - m_shm->pool); - if (event == EM_EVENT_UNDEF) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't allocate event!"); - - msg = em_event_pointer(event); - msg->command = APP_CMD_TMO_PERIODIC; - msg->index = i; - msg->dummy_delay = (i % APP_INC_DLY_MODULO) ? - 0 : APP_INCREASING_DLY; - memset(&eo_ctx->periodic.tmo[i], 0, sizeof(app_tmo_data_t)); - eo_ctx->periodic.tmo[i].event = event; - - em_tmo_t tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_PERIODIC, - eo_ctx->my_prio_q); - if (unlikely(tmo == EM_TMO_UNDEF)) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't allocate periodic tmo nr %d!", i + 1); - eo_ctx->periodic.tmo[i].tmo = tmo; - - /* always test min and max tmo */ - if (i == 0) - fixed = APP_TIMEOUT_MAX_US; - else if (i == 1) - fixed = APP_TIMEOUT_MIN_US; - eo_ctx->periodic.tmo[i].howmuch = rand_timeout(&m_randseed, - eo_ctx, fixed); - eo_ctx->periodic.tmo[i].when = em_timer_current_tick(m_shm->tmr); - if (em_tmo_set_periodic(eo_ctx->periodic.tmo[i].tmo, - 0, - eo_ctx->periodic.tmo[i].howmuch, - eo_ctx->periodic.tmo[i].event) != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't activate periodic tmo nr %d!\n", i + 1); - } - - if (APP_MAX_PERIODIC) - APPL_PRINT("Started periodic\n"); -} - -void start_test(app_eo_ctx_t *eo_ctx) -{ - eo_ctx->oneshot.received = 0; - eo_ctx->oneshot.cancelled = 0; - eo_ctx->oneshot.cancel_fail = 0; - - eo_ctx->periodic.received = 0; - eo_ctx->periodic.cancelled = 0; - eo_ctx->periodic.cancel_fail = 0; - - time_t t = time(NULL); - struct tm *tm = localtime(&t); - char s[40]; - - strftime(s, sizeof(s), "%b-%d %H:%M:%S", tm); - eo_ctx->rounds++; - APPL_PRINT("\n\n%s ROUND %" PRIu64 " ************\n", - s, eo_ctx->rounds); - - eo_ctx->nocancel = 1; - /* do this before starting tmo as some could be received while still here */ - atomic_store_explicit(&eo_ctx->state, APP_STATE_RUNNING, memory_order_release); - - set_timeouts(eo_ctx); /* timeouts start coming */ - APPL_PRINT("Running\n"); - eo_ctx->nocancel = 0; /* after all timeouts are completely created */ -} - -void stop_test(app_eo_ctx_t *eo_ctx) -{ - em_event_t event; - - /* test assumes all oneshots are received, - * but this will stop possible periodic timeout processing - */ - atomic_store_explicit(&eo_ctx->state, APP_STATE_STOPPING, memory_order_release); - - /* cancel ongoing periodic */ - for (int i = 0; i < APP_MAX_PERIODIC; i++) { - event = EM_EVENT_UNDEF; - - /* lock tmo to avoid race with possible unfinished random cancel */ - while (atomic_flag_test_and_set_explicit(&eo_ctx->periodic.tmo[i].lock, - memory_order_acquire)) - ; - - /* double cancel is an error */ - if (!eo_ctx->periodic.tmo[i].canceled && !eo_ctx->periodic.tmo[i].waitevt) { - em_status_t ret = em_tmo_cancel(eo_ctx->periodic.tmo[i].tmo, &event); - - if (ret != EM_OK && ret != EM_ERR_TOONEAR) { - APPL_PRINT("%s: cancel returned %u!\n", __func__, ret); - eo_ctx->errors++; - } - } - atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[i].lock, memory_order_release); - if (event != EM_EVENT_UNDEF) - em_free(event); - } -} - -void cleanup_test(app_eo_ctx_t *eo_ctx) -{ - int i; - uint64_t t1, t2; - struct timespec ts1, ts2; - - APPL_PRINT("\nCleaning up\n"); - - t1 = em_timer_current_tick(m_shm->tmr); - clock_gettime(APP_LINUX_CLOCK_SRC, &ts1); - for (i = 0; i < APP_MAX_TMOS; i++) { - em_event_t evt = EM_EVENT_UNDEF; - - if (eo_ctx->oneshot.tmo[i].tmo == EM_TMO_UNDEF) - continue; - - if (em_tmo_delete(eo_ctx->oneshot.tmo[i].tmo, &evt) != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't delete tmo!\n"); - eo_ctx->oneshot.tmo[i].tmo = EM_TMO_UNDEF; - if (evt != EM_EVENT_UNDEF && !appl_shm->exit_flag) { - APPL_PRINT("WARN - tmo_delete returned event,\n" - " should be received or canceled!\n"); - em_free(evt); - } - } - t2 = em_timer_current_tick(m_shm->tmr); - clock_gettime(APP_LINUX_CLOCK_SRC, &ts2); - APPL_PRINT("Timer: Deleting %d timeouts took %" PRIu64 - " ns (%" PRIu64 " ns each)\n", i, - tick_diff_ns(t1, t2, eo_ctx->hz), - tick_diff_ns(t1, t2, eo_ctx->hz) / APP_MAX_TMOS); - APPL_PRINT("Linux: Deleting %d timeouts took %" PRIu64 " ns (%" PRIu64 - " ns each)\n", i, ts_diff_ns(&ts1, &ts2), - ts_diff_ns(&ts1, &ts2) / APP_MAX_TMOS); - - for (i = 0; i < APP_MAX_PERIODIC; i++) { - em_event_t evt = EM_EVENT_UNDEF; - - if (eo_ctx->periodic.tmo[i].tmo == EM_TMO_UNDEF) - continue; - - if (em_tmo_delete(eo_ctx->periodic.tmo[i].tmo, &evt) != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Can't delete periodic tmo!\n"); - eo_ctx->periodic.tmo[i].tmo = EM_TMO_UNDEF; - if (evt != EM_EVENT_UNDEF) - em_free(evt); - } - atomic_store_explicit(&eo_ctx->state, APP_STATE_IDLE, memory_order_release); -} - -void check_test(app_eo_ctx_t *eo_ctx) -{ - unsigned int errors; - - atomic_store_explicit(&eo_ctx->state, APP_STATE_CHECKING, memory_order_release); - eo_ctx->nocancel = 1; - - APPL_PRINT("\nHeartbeat count %" PRIu64 "\n", eo_ctx->heartbeat_count); - - errors = check_single(eo_ctx); - errors += check_periodic(eo_ctx); - eo_ctx->errors += errors; - APPL_PRINT("Errors: %u\n\n", errors); - - APPL_PRINT("TOTAL RUNTIME/US: min %" PRIi64 ", max %" PRIi64 "\n", - tick_diff_ns(0, eo_ctx->min_diff, eo_ctx->hz) / 1000, - tick_diff_ns(0, eo_ctx->max_diff, eo_ctx->hz) / 1000); - APPL_PRINT("TOTAL RUNTIME LINUX/US: min %" PRIi64 ", max %" PRIi64 "\n", - eo_ctx->min_diff_l / 1000, eo_ctx->max_diff_l / 1000); - APPL_PRINT("TOTAL ERRORS: %u\n", eo_ctx->errors); - APPL_PRINT("TOTAL ACK FAILS (OK): %u\n", eo_ctx->ack_errors); - if (APP_INCREASING_DLY) - APPL_PRINT("TOTAL MAX DUMMY PROCESSING/US: %u\n", - eo_ctx->max_dummy); -} - -/* timespec diff to ns */ -int64_t ts_diff_ns(struct timespec *ts1, struct timespec *ts2) -{ - uint64_t t1 = ts1->tv_nsec + (ts1->tv_sec * 1000000000ULL); - uint64_t t2 = ts2->tv_nsec + (ts2->tv_sec * 1000000000ULL); - - return (t2 - t1); -} - -/* timer tick diff to ns */ -int64_t tick_diff_ns(em_timer_tick_t t1, em_timer_tick_t t2, uint64_t hz) -{ - int64_t ticks = (int64_t)t2 - (int64_t)t1; - double tick_ns = 1000000000.0 / (double)hz; - - return (int64_t)((double)ticks * tick_ns); -} - -void random_cancel(app_eo_ctx_t *eo_ctx) -{ - unsigned int idx = (unsigned int)rand_r(&m_randseed) % - (APP_CANCEL_MODULO ? APP_CANCEL_MODULO : 1); - - if (idx >= APP_MAX_TMOS || idx == 0) - return; - - /* This is tricky as we're possibly canceling a timeout that might be under work - * on another core, so lock tmo state before trying cancel to avoid race - */ - while (atomic_flag_test_and_set_explicit(&eo_ctx->oneshot.tmo[idx].lock, - memory_order_acquire)) - ; - - if (!eo_ctx->oneshot.tmo[idx].canceled && !eo_ctx->oneshot.tmo[idx].waitevt && - eo_ctx->oneshot.tmo[idx].tmo != EM_TMO_UNDEF) { - /* try to cancel (Tmo might have been fired already) */ - em_event_t evt = EM_EVENT_UNDEF; - em_status_t retval; - em_timer_tick_t now; - - retval = em_tmo_cancel(eo_ctx->oneshot.tmo[idx].tmo, &evt); - now = em_timer_current_tick(m_shm->tmr); - if (retval == EM_OK) { - eo_ctx->oneshot.tmo[idx].canceled = now; - eo_ctx->oneshot.cancelled++; - if (evt == EM_EVENT_UNDEF) { /* cancel ok but no event returned */ - APPL_PRINT("ERR: cancel ok but no event!\n"); - eo_ctx->errors++; - } - if (eo_ctx->oneshot.tmo[idx].appeared) { - APPL_PRINT("ERR: cancel ok after event received!\n"); - eo_ctx->errors++; - } - } else { /* cancel fail, too late */ - eo_ctx->oneshot.cancel_fail += 1; - if (evt != EM_EVENT_UNDEF) { /* cancel fail but event returned */ - APPL_PRINT("ERR: cancel fail but event return (rv %u)!\n", retval); - eo_ctx->errors++; - } else { /* event should appear later */ - eo_ctx->oneshot.tmo[idx].waitevt = now; - } - } - - if (evt != EM_EVENT_UNDEF) /* cancelled in time, free event */ - em_free(evt); - - VISUAL_DBG("c"); - } - atomic_flag_clear_explicit(&eo_ctx->oneshot.tmo[idx].lock, memory_order_release); -} - -em_event_t random_cancel_periodic(app_eo_ctx_t *eo_ctx) -{ - unsigned int idx = ((unsigned int)rand_r(&m_randseed)) % - (APP_CANCEL_MODULO_P ? APP_CANCEL_MODULO_P : 1); - - if (idx >= APP_MAX_PERIODIC || idx == 0) - return EM_EVENT_UNDEF; - - /* lock tmo state before trying cancel to avoid race on receive */ - while (atomic_flag_test_and_set_explicit(&eo_ctx->periodic.tmo[idx].lock, - memory_order_acquire)) - ; - - if (!eo_ctx->periodic.tmo[idx].canceled && !eo_ctx->periodic.tmo[idx].waitevt && - eo_ctx->periodic.tmo[idx].tmo != EM_TMO_UNDEF) { - /* try to cancel (Tmo might have been fired already) */ - em_event_t evt = EM_EVENT_UNDEF; - - if (em_tmo_cancel(eo_ctx->periodic.tmo[idx].tmo, &evt) == EM_OK) { - eo_ctx->periodic.tmo[idx].canceled = em_timer_current_tick(m_shm->tmr); - eo_ctx->periodic.cancelled++; - } else { - eo_ctx->periodic.cancel_fail++; - if (evt == EM_EVENT_UNDEF) {/* cancel failed, event should appear */ - eo_ctx->periodic.tmo[idx].waitevt = - em_timer_current_tick(m_shm->tmr); - } - } - eo_ctx->periodic.tmo[idx].appeared = 0; - VISUAL_DBG("C"); - if (evt != EM_EVENT_UNDEF) { - atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[idx].lock, - memory_order_release); - em_free(evt); - return evt; /* to skip wrong free in receive */ - } - } - - atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[idx].lock, memory_order_release); - return EM_EVENT_UNDEF; -} - -unsigned int check_single(app_eo_ctx_t *eo_ctx) -{ - int i; - unsigned int errors = 0; - int64_t min_diff = INT64_MAX; - int64_t max_diff = 0; - int64_t avg_diff = 0; - int64_t min_linux = INT64_MAX; - int64_t max_linux = 0; - int64_t avg_linux = 0; - struct timespec zerot; - - memset(&zerot, 0, sizeof(zerot)); /* 0 to use diff*/ - APPL_PRINT("ONESHOT:\n"); - APPL_PRINT(" Received: %" PRIu64 ", expected %lu\n", - eo_ctx->oneshot.received, - APP_MAX_TMOS - eo_ctx->oneshot.cancelled); - APPL_PRINT(" Cancelled OK: %" PRIu64 "\n", eo_ctx->oneshot.cancelled); - APPL_PRINT(" Cancel failed (too late): %" PRIu64 "\n", - eo_ctx->oneshot.cancel_fail); - - for (i = 0; i < APP_MAX_TMOS; i++) { - /* missing any? */ - if (!eo_ctx->oneshot.tmo[i].canceled && !eo_ctx->oneshot.tmo[i].waitevt && - !eo_ctx->oneshot.tmo[i].appeared) { - APPL_PRINT(" ERR: TMO %d event missing!\n", i); - APPL_PRINT(" - to %lu ticks\n", eo_ctx->oneshot.tmo[i].howmuch); - errors++; - } - - /* calculate timing */ - if (eo_ctx->oneshot.tmo[i].appeared) { - /* timer ticks */ - uint64_t target = eo_ctx->oneshot.tmo[i].when + - eo_ctx->oneshot.tmo[i].howmuch; - int64_t diff = (int64_t)eo_ctx->oneshot.tmo[i].appeared - (int64_t)target; - - if (APP_PRINT_EACH_TMO) - APPL_PRINT("Timeout #%u: diff %" PRIi64 - " ticks\n", i + 1, diff); - if (min_diff > diff) - min_diff = diff; - if (max_diff < diff) - max_diff = diff; - avg_diff += diff; - - /* linux time in ns*/ - int64_t ldiff; - - ldiff = tick_diff_ns(0, eo_ctx->oneshot.tmo[i].howmuch, eo_ctx->hz); - target = ts_diff_ns(&zerot, &eo_ctx->oneshot.tmo[i].linux_when) + ldiff; - diff = (int64_t)ts_diff_ns(&zerot, &eo_ctx->oneshot.tmo[i].linux_appeared) - - (int64_t)target; - if (APP_PRINT_EACH_TMO) - APPL_PRINT("Timeout #%d: diff %" PRIi64 - " linux ns\n", i + 1, diff); - if (min_linux > diff) - min_linux = diff; - if (max_linux < diff) - max_linux = diff; - avg_linux += diff; - } - - /* canceled ok but still appeared */ - if (eo_ctx->oneshot.tmo[i].canceled && eo_ctx->oneshot.tmo[i].appeared) { - APPL_PRINT(" ERR: TMO %d cancel ok but event appeared!\n", i); - APPL_PRINT(" - expire %lu, cancel ok at %lu\n", - eo_ctx->oneshot.tmo[i].when, - eo_ctx->oneshot.tmo[i].canceled); - errors++; - } - - /* cancel failed as too late, but event did not appear */ - if (eo_ctx->oneshot.tmo[i].waitevt && !eo_ctx->oneshot.tmo[i].appeared) { - APPL_PRINT(" ERR: TMO %d cancel fail but event never appeared!\n", i); - APPL_PRINT(" - expire %lu, cancel fail at %lu\n", - eo_ctx->oneshot.tmo[i].when, - eo_ctx->oneshot.tmo[i].waitevt); - errors++; - } - - /* cancel failed but should have succeeded? */ - if (eo_ctx->oneshot.tmo[i].waitevt) { - em_timer_tick_t exp_tick = eo_ctx->oneshot.tmo[i].when + - eo_ctx->oneshot.tmo[i].howmuch; - int64_t diff = tick_diff_ns(eo_ctx->oneshot.tmo[i].waitevt, exp_tick, - eo_ctx->hz); - - if (diff > (int64_t)eo_ctx->min_tmo + - (int64_t)eo_ctx->res_ns + APP_CANCEL_MARGIN_NS) { - APPL_PRINT("ERR: cancel should have worked, "); - APPL_PRINT("%ldns before target(min %lu)\n", diff, eo_ctx->min_tmo); - errors++; - } - } - } - - avg_diff /= (int64_t)eo_ctx->oneshot.received; - avg_linux /= (int64_t)eo_ctx->oneshot.received; - APPL_PRINT(" SUMMARY/TICKS: min %" PRIi64 ", max %" PRIi64 - ", avg %" PRIi64 "\n", min_diff, max_diff, - avg_diff); - APPL_PRINT(" /US: min %" PRIi64 ", max %" PRIi64 - ", avg %" PRIi64 "\n", - tick_diff_ns(0, min_diff, eo_ctx->hz) / 1000, - tick_diff_ns(0, max_diff, eo_ctx->hz) / 1000, - tick_diff_ns(0, avg_diff, eo_ctx->hz) / 1000); - APPL_PRINT(" SUMMARY/LINUX US: min %" PRIi64 ", max %" PRIi64 - ", avg %" PRIi64 "\n", min_linux / 1000, max_linux / 1000, - avg_linux / 1000); - - /* over total runtime */ - if (eo_ctx->min_diff > min_diff) - eo_ctx->min_diff = min_diff; - if (eo_ctx->max_diff < max_diff) - eo_ctx->max_diff = max_diff; - if (eo_ctx->min_diff_l > min_linux) - eo_ctx->min_diff_l = min_linux; - if (eo_ctx->max_diff_l < max_linux) - eo_ctx->max_diff_l = max_linux; - - return errors; -} - -unsigned int check_periodic(app_eo_ctx_t *eo_ctx) -{ - int i; - unsigned int errors = 0; - unsigned int max_dummy = 0; - - APPL_PRINT("PERIODIC:\n"); - APPL_PRINT(" Received: %" PRIu64 "\n", eo_ctx->periodic.received); - APPL_PRINT(" Cancelled: %" PRIu64 "\n", eo_ctx->periodic.cancelled); - APPL_PRINT(" Cancel failed (too late): %" PRIu64 "\n", eo_ctx->periodic.cancel_fail); - - for (i = 0; i < APP_MAX_PERIODIC; i++) { - /* missing? */ - if (!eo_ctx->periodic.tmo[i].canceled && !eo_ctx->periodic.tmo[i].waitevt && - !eo_ctx->periodic.tmo[i].appeared) { - APPL_PRINT(" ERR: No periodic TMO %d event(s)!\n", i); - errors++; - } - /* appeared after successful cancel? */ - if (eo_ctx->periodic.tmo[i].canceled && eo_ctx->periodic.tmo[i].appeared) { - APPL_PRINT(" ERR: periodic TMO %d event(s) after successful cancel!\n", i); - errors++; - } - /* did not appear after failed cancel? */ - if (APP_PER_CANCEL_CHK) { - if (eo_ctx->periodic.tmo[i].waitevt && !eo_ctx->periodic.tmo[i].appeared) { - APPL_PRINT(" ERR: periodic TMO %d no event after failed cancel!\n", - i); - errors++; - } - } - if (max_dummy < eo_ctx->periodic.tmo[i].max_dummy) - max_dummy = eo_ctx->periodic.tmo[i].max_dummy; - } - - if (max_dummy) { - APPL_PRINT(" Max extra processing delay before ack (us): %u\n", max_dummy); - if (eo_ctx->max_dummy < max_dummy) - eo_ctx->max_dummy = max_dummy; - } - - return errors; -} - -/* emulate processing delay */ -static void dummy_processing(unsigned int us) -{ - struct timespec now, sample; - - VISUAL_DBG("D"); - - clock_gettime(APP_LINUX_CLOCK_SRC, &now); - do { - clock_gettime(APP_LINUX_CLOCK_SRC, &sample); - } while (ts_diff_ns(&now, &sample) / 1000ULL < us); - VISUAL_DBG("d"); -} +/* + * Copyright (c) 2017-2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine timer basic test. + * + * Simple test for timer (does not test everything). Creates and deletes random + * timers and checks how accurate the timeout indications are against timer + * itself and also linux time (clock_gettime). Single EO, but receiving queue + * is parallel so multiple threads can process timeouts concurrently. + * + * Exception/error management is simplified and aborts on most errors. + * + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +#define TEST_VERSION "v1.4" + +/* + * Test app defines. + * Be careful, conflicting values may not be checked! + */ +#define APP_TIMER_RESOLUTION_US 1000 /* requested em-timer resolution */ +#define APP_TIMEOUT_MAX_US (10000ULL * 1000ULL) /* max random timeout */ +#define APP_TIMEOUT_MIN_US 5000 /* minimum random timeout */ +#define APP_MAX_TMOS 1000 /* simultaneous oneshots */ +#define APP_MAX_PERIODIC 300 /* simultaneous periodic */ +#define APP_PRINT_EACH_TMO 0 /* 0 to only print summary */ +#define APP_PRINT_DOTS 1 /* visual progress dots */ +#define APP_VISUAL_DEBUG 0 /* 0|1, for testing only. Slow, but visual */ +#define APP_EXTRA_PRINTS 0 /* debugging helper */ +#define APP_PER_CANCEL_CHK 0 /* do not use yet, WIP */ + +#define APP_SHMEM_NAME "TimerTestShMem" +#define APP_HEARTBEAT_MS 2000 /* heartbeat tick period */ +#define APP_CHECK_COUNT (APP_TIMEOUT_MAX_US / 1000 / APP_HEARTBEAT_MS) +#define APP_CHECK_LIMIT (3 * (APP_CHECK_COUNT + 1)) /* num HB */ +#define APP_CHECK_GUARD 6 /* num HB */ + +#define APP_CANCEL_MODULO_P (APP_MAX_PERIODIC * 50) /* cancel probability */ +#define APP_CANCEL_MODULO (APP_MAX_TMOS * 5) /* cancel probability */ +#define APP_CANCEL_MARGIN_NS 100000 /* check limit for cancel fail ok */ +#define APP_LINUX_CLOCK_SRC CLOCK_MONOTONIC /* for clock_gettime */ +#define APP_INCREASING_DLY 7 /* if not 0, add this to increasing + * delay before calling periodic timer ack + */ +#define APP_INC_DLY_MODULO 15 /* apply increasing delay to every Nth tmo */ + +#if APP_VISUAL_DEBUG +#define VISUAL_DBG(x) APPL_PRINT(x) +#else +#define VISUAL_DBG(x) do {} while (0) +#endif + +#define APP_EO_NAME "Test EO" + +/** + * Test application message event + */ +typedef enum app_cmd_t { + APP_CMD_HEARTBEAT, + APP_CMD_TMO_SINGLE, + APP_CMD_TMO_PERIODIC +} app_cmd_t; + +typedef struct app_msg_t { + app_cmd_t command; + int index; + int dummy_delay; +} app_msg_t; + +typedef struct app_tmo_data_t { + em_tmo_t tmo ENV_CACHE_LINE_ALIGNED; + em_event_t event; + em_timer_tick_t when; + em_timer_tick_t howmuch; + em_timer_tick_t appeared; + struct timespec linux_when; + struct timespec linux_appeared; + em_timer_tick_t canceled; /* acts as flag, but also stores tick when done */ + em_timer_tick_t waitevt; /* acts as flag, but also stores tick when done */ + atomic_flag lock; /* used when adding timestamps or cancelling */ + unsigned int max_dummy; +} app_tmo_data_t; + +typedef enum app_test_state_t { + APP_STATE_IDLE = 0, + APP_STATE_RUNNING, + APP_STATE_STOPPING, + APP_STATE_CHECKING +} app_test_state_t; + +const char *dot_marks = " .-#"; /* per state above */ + +/** + * EO context + * + * Shared data. Concurrently manipulated fields use C atomics + */ +typedef struct app_eo_ctx_t { + em_tmo_t heartbeat_tmo; + uint64_t heartbeat_count; + uint64_t heartbeat_target; + em_queue_t my_q; + em_queue_t my_prio_q; + uint64_t hz; + uint64_t linux_hz; + uint64_t rounds; + int nocancel; + + atomic_int state; + atomic_uint errors; + atomic_uint ack_errors; + + int64_t min_diff; + int64_t max_diff; + int64_t min_diff_l; + int64_t max_diff_l; + unsigned int max_dummy; + uint64_t min_tmo; + uint64_t res_ns; + + struct { + app_tmo_data_t tmo[APP_MAX_TMOS]; + + atomic_uint_fast64_t received ENV_CACHE_LINE_ALIGNED; + atomic_uint_fast64_t cancelled; + atomic_uint_fast64_t cancel_fail; + } oneshot; + + struct { + app_tmo_data_t tmo[APP_MAX_PERIODIC]; + + atomic_uint_fast64_t received ENV_CACHE_LINE_ALIGNED; + atomic_uint_fast64_t cancelled; + atomic_uint_fast64_t cancel_fail; + } periodic; +} app_eo_ctx_t; + +/** + * Timer test shared memory data + */ +typedef struct timer_app_shm_t { + /* Event pool used by this application */ + em_pool_t pool; + /* EO context data */ + app_eo_ctx_t eo_context; + /* Event timer handle */ + em_timer_t tmr; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} timer_app_shm_t; + +/* EM-thread locals */ +static ENV_LOCAL timer_app_shm_t *m_shm; +static ENV_LOCAL unsigned int m_randseed; + +/* Local function prototypes */ +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, + const em_eo_conf_t *conf); +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, + void *q_context); + +static em_timer_tick_t rand_timeout(unsigned int *seed, app_eo_ctx_t *eo_ctx, + unsigned int fixed); +static void set_timeouts(app_eo_ctx_t *eo_ctx); +static void start_test(app_eo_ctx_t *eo_ctx); +static void check_test(app_eo_ctx_t *eo_ctx); +static void stop_test(app_eo_ctx_t *eo_ctx); +static void cleanup_test(app_eo_ctx_t *eo_ctx); +static int64_t ts_diff_ns(struct timespec *ts1, struct timespec *ts2); +static int64_t tick_diff_ns(em_timer_tick_t t1, em_timer_tick_t t2, + uint64_t hz); +static void random_cancel(app_eo_ctx_t *eo_ctx); +static em_event_t random_cancel_periodic(app_eo_ctx_t *eo_ctx); +static unsigned int check_single(app_eo_ctx_t *eo_ctx); +static unsigned int check_periodic(app_eo_ctx_t *eo_ctx); +static void dummy_processing(unsigned int us); +static int handle_periodic_event(app_eo_ctx_t *eo_ctx, em_event_t event, + app_msg_t *msgin); +static void handle_single_event(app_eo_ctx_t *eo_ctx, em_event_t event, + app_msg_t *msgin); +static void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_queue_t queue); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Local EO error handler. Prevents error when ack() is done after cancel() + * since it's normal here. + * + * @param eo Execution object id + * @param error The error code + * @param escope Error scope + * @param args List of arguments (__FILE__, __func__, __LINE__, + * (format), ## __VA_ARGS__) + * + * @return The original error code. + */ +static em_status_t eo_error_handler(em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args) +{ + VISUAL_DBG("E"); + atomic_fetch_add_explicit(&m_shm->eo_context.errors, 1, memory_order_relaxed); + return test_error_handler(eo, error, escope, args); +} + +/** + * Before EM - Init of the test application. + * + * The shared memory is needed if EM instance runs on multiple processes. + * Doing it like this makes it possible to run the app both as threads (-t) + * as well as processes (-p). + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void test_init(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + /* first core creates ShMem */ + if (core == 0) { + m_shm = env_shared_reserve(APP_SHMEM_NAME, + sizeof(timer_app_shm_t)); + /* initialize it */ + if (m_shm) + memset(m_shm, 0, sizeof(timer_app_shm_t)); + + em_register_error_handler(test_error_handler); + if (APP_EXTRA_PRINTS) + APPL_PRINT("%ldk shared memory for app context\n", + sizeof(timer_app_shm_t) / 1000); + + } else { + m_shm = env_shared_lookup(APP_SHMEM_NAME); + } + + if (m_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "ShMem init failed on EM-core: %u", + em_core_id()); + } + + APPL_PRINT("core %d: %s done\n", core, __func__); +} + +/** + * Startup of the timer test EM application. + * + * At this point EM is up, but no EOs exist. EM API can be used to create + * queues, EOs etc. + * + * @attention Run only on one EM core. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void test_start(const appl_conf_t *appl_conf) +{ + em_eo_t eo; + em_timer_attr_t attr; + em_timer_res_param_t resparam; + em_queue_t queue; + em_status_t stat; + app_eo_ctx_t *eo_ctx; + em_event_t event; + app_msg_t *msg; + struct timespec ts; + uint64_t period; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + m_shm->pool = appl_conf->pools[0]; + else + m_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%d\n" + " Application running on %u EM-cores (procs:%u, threads:%u)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + appl_conf->core_count, appl_conf->num_procs, appl_conf->num_threads, + m_shm->pool); + + test_fatal_if(m_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* Create EO */ + eo_ctx = &m_shm->eo_context; + eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, + app_eo_stop, NULL, app_eo_receive, eo_ctx); + test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); + + /* atomic queue for control */ + queue = em_queue_create("Control Q", + EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create queue!"); + + eo_ctx->my_q = queue; + /* another parallel high priority for timeout handling*/ + queue = em_queue_create("Tmo Q", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_HIGHEST, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create queue!"); + + eo_ctx->my_prio_q = queue; + + stat = em_eo_register_error_handler(eo, eo_error_handler); + test_fatal_if(stat != EM_OK, "Failed to register EO error handler"); + + /* create shared timer and store handle in + * shared memory. Require the configured app values + */ + em_timer_attr_init(&attr); + + /* going to change resolution, so need to check limits */ + memset(&resparam, 0, sizeof(em_timer_res_param_t)); + resparam.res_ns = APP_TIMER_RESOLUTION_US * 1000ULL; + stat = em_timer_res_capability(&resparam, EM_TIMER_CLKSRC_DEFAULT); + test_fatal_if(stat != EM_OK, "Timer does not support the resolution"); + + strncpy(attr.name, "TestTimer", EM_TIMER_NAME_LEN); + attr.num_tmo = APP_MAX_TMOS + APP_MAX_PERIODIC + 1; + attr.resparam = resparam; + attr.resparam.res_hz = 0; + m_shm->tmr = em_timer_create(&attr); + test_fatal_if(m_shm->tmr == EM_TIMER_UNDEF, "Failed to create timer!"); + + eo_ctx->min_tmo = resparam.min_tmo; + + /* Start EO */ + stat = em_eo_start_sync(eo, NULL, NULL); + test_fatal_if(stat != EM_OK, "Failed to start EO!"); + + /* create periodic timeout for heartbeat */ + eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_PERIODIC, + eo_ctx->my_q); + test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, + "Can't allocate heartbeat_tmo!\n"); + + event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", + sizeof(app_msg_t)); + + msg = em_event_pointer(event); + msg->command = APP_CMD_HEARTBEAT; + eo_ctx->hz = em_timer_get_freq(m_shm->tmr); + if (eo_ctx->hz < 100) + APPL_ERROR("WARNING - timer hz very low!\n"); + + /* linux time check */ + test_fatal_if(clock_getres(APP_LINUX_CLOCK_SRC, &ts) != 0, + "clock_getres() failed!\n"); + + period = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); + eo_ctx->linux_hz = 1000000000ULL / period; + APPL_PRINT("Linux reports clock running at %" PRIu64 " hz\n", eo_ctx->linux_hz); + + /* start heartbeat, will later start the test */ + period = (APP_HEARTBEAT_MS * eo_ctx->hz) / 1000; + test_fatal_if(period < 1, "timer resolution is too low!\n"); + + stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, period, event); + test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); + + APPL_PRINT("%s done, test repetition interval %ds\n\n", __func__, + (int)((APP_HEARTBEAT_MS * APP_CHECK_LIMIT) / 1000)); +} + +void test_stop(const appl_conf_t *appl_conf) +{ + const int core = em_core_id(); + em_status_t ret; + em_eo_t eo; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + eo = em_eo_find(APP_EO_NAME); + test_fatal_if(eo == EM_EO_UNDEF, + "Could not find EO:%s", APP_EO_NAME); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_timer_delete(m_shm->tmr); + test_fatal_if(ret != EM_OK, + "Timer:%" PRI_TMR " delete:%" PRI_STAT "", + m_shm->tmr, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void test_term(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + APPL_PRINT("%s() on EM-core %d\n", __func__, em_core_id()); + if (m_shm != NULL) { + env_shared_free(m_shm); + m_shm = NULL; + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + */ +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, + const em_eo_conf_t *conf) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + em_timer_attr_t attr; + em_timer_t tmr; + int num_timers; + + (void)eo; + (void)conf; + + APPL_PRINT("timer_test %s\n", TEST_VERSION); + APPL_PRINT("EO start\n"); + + num_timers = em_timer_get_all(&tmr, 1); + APPL_PRINT("System has %d timer(s)\n", num_timers); + + if (APP_EXTRA_PRINTS) { + if (__atomic_always_lock_free(sizeof(uint64_t), NULL)) + APPL_PRINT("64b atomics are lock-free\n"); + else + APPL_PRINT("64b atomics may use locks\n"); + } + + if (em_timer_get_attr(m_shm->tmr, &attr) != EM_OK) { + APPL_ERROR("Can't get timer info\n"); + return EM_ERR_BAD_ID; + } + APPL_PRINT("Timer \"%s\" info:\n", attr.name); + APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); + APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); + APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); + APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); + APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", + em_timer_get_freq(m_shm->tmr)); + + eo_ctx->res_ns = attr.resparam.res_ns; + + if (APP_INCREASING_DLY) { + APPL_PRINT("Using increasing processing delay (%d, 1/%d)\n", + APP_INCREASING_DLY, APP_INC_DLY_MODULO); + } + + /* init other local EO context */ + eo_ctx->min_diff = INT64_MAX; + eo_ctx->max_diff = 0; + eo_ctx->min_diff_l = INT64_MAX; + eo_ctx->max_diff_l = 0; + + return EM_OK; +} + +/** + * @private + * + * EO per thread start function. + */ +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + + (void)eo_ctx; + (void)eo; + + /* per-thread random seed */ + m_randseed = time(NULL); + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + em_event_t event = EM_EVENT_UNDEF; + em_status_t ret; + + APPL_PRINT("EO stop\n"); + + if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { + if (em_tmo_get_state(eo_ctx->heartbeat_tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->heartbeat_tmo, &event); + + em_tmo_delete(eo_ctx->heartbeat_tmo); + + eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; + if (event != EM_EVENT_UNDEF) + em_free(event); + } + + cleanup_test(eo_ctx); + + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + return EM_OK; +} + +/** + * @private + * + * EO receive function. Runs the example test app after initialization. + */ +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, + void *q_context) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + int reuse = 0; + + (void)q_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + VISUAL_DBG("e"); + + if (type == EM_EVENT_TYPE_SW) { + app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); + + switch (msgin->command) { + case APP_CMD_HEARTBEAT: /* uses atomic queue */ + VISUAL_DBG("H"); + handle_heartbeat(eo_ctx, queue); + if (em_tmo_ack(eo_ctx->heartbeat_tmo, event) != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Heartbeat ack() failed!\n"); + reuse = 1; + break; + + case APP_CMD_TMO_SINGLE: /* parallel queue */ + VISUAL_DBG("s"); + if (queue != eo_ctx->my_prio_q) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "tmo from wrong queue!\n"); + handle_single_event(eo_ctx, event, msgin); + break; + + case APP_CMD_TMO_PERIODIC: /* parallel queue */ + VISUAL_DBG("p"); + if (queue != eo_ctx->my_prio_q) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "tmo from wrong queue!\n"); + reuse = handle_periodic_event(eo_ctx, event, msgin); + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Invalid event received!\n"); + } + } else { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Invalid event type received!\n"); + } + + if (!reuse) + em_free(event); +} + +void handle_single_event(app_eo_ctx_t *eo_ctx, em_event_t event, + app_msg_t *msgin) +{ + (void)event; + + /* not expecting oneshot after run state */ + if (atomic_load_explicit(&eo_ctx->state, memory_order_acquire) != APP_STATE_RUNNING) { + APPL_PRINT("ERR: Tmo received after test finish\n"); + eo_ctx->errors++; + return; + } + if (msgin->index < 0 || msgin->index >= APP_MAX_TMOS) { + APPL_PRINT("ERR: tmo index out of range. Corrupted event?\n"); + eo_ctx->errors++; + return; + } + if (eo_ctx->oneshot.tmo[msgin->index].appeared) { + APPL_PRINT("ERR: Single Tmo received twice\n"); + eo_ctx->errors++; + return; + } + + /* lock tmo to avoid race with random cancel by another core */ + while (atomic_flag_test_and_set_explicit(&eo_ctx->oneshot.tmo[msgin->index].lock, + memory_order_acquire)) + ; + + eo_ctx->oneshot.tmo[msgin->index].appeared = em_timer_current_tick(m_shm->tmr); + clock_gettime(APP_LINUX_CLOCK_SRC, &eo_ctx->oneshot.tmo[msgin->index].linux_appeared); + atomic_flag_clear_explicit(&eo_ctx->oneshot.tmo[msgin->index].lock, memory_order_release); + atomic_fetch_add_explicit(&eo_ctx->oneshot.received, 1, memory_order_relaxed); + + if (!eo_ctx->nocancel) + random_cancel(eo_ctx); +} + +int handle_periodic_event(app_eo_ctx_t *eo_ctx, em_event_t event, + app_msg_t *msgin) +{ + int reuse = 0; + + if (msgin->index < 0 || msgin->index >= APP_MAX_PERIODIC) { + APPL_PRINT("ERR: Periodic tmo index out of range\n"); + eo_ctx->errors++; + return reuse; + } + int state = atomic_load_explicit(&eo_ctx->state, memory_order_acquire); + + if (state != APP_STATE_RUNNING && state != APP_STATE_STOPPING) { + APPL_PRINT("ERR: Periodic tmo received after test finish\n"); + eo_ctx->errors++; + return reuse; + } + + while (atomic_flag_test_and_set_explicit(&eo_ctx->periodic.tmo[msgin->index].lock, + memory_order_acquire)) + ; + + eo_ctx->periodic.tmo[msgin->index].appeared = em_timer_current_tick(m_shm->tmr); + atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[msgin->index].lock, memory_order_release); + atomic_fetch_add_explicit(&eo_ctx->periodic.received, 1, memory_order_relaxed); + + /* periodic tmo may keep coming a while after end of round */ + if (atomic_load_explicit(&eo_ctx->state, memory_order_acquire) == APP_STATE_STOPPING) + return 0; + + reuse = 1; + if (APP_INCREASING_DLY && msgin->dummy_delay) { + /* add delay before ack() to test late ack */ + dummy_processing(msgin->dummy_delay); + msgin->dummy_delay += APP_INCREASING_DLY; + eo_ctx->periodic.tmo[msgin->index].max_dummy = msgin->dummy_delay; + } + em_status_t ret = em_tmo_ack(eo_ctx->periodic.tmo[msgin->index].tmo, event); + + if (ret == EM_ERR_CANCELED) { + if (!eo_ctx->periodic.tmo[msgin->index].canceled && + !eo_ctx->periodic.tmo[msgin->index].waitevt) { + eo_ctx->ack_errors++; + reuse = 0; + } + } else { + if (ret != EM_OK) { + APPL_PRINT("em_tmo_ack error:%" PRI_STAT "\n", ret); + eo_ctx->ack_errors++; + reuse = 0; + } + } + if (!eo_ctx->nocancel) { + if (random_cancel_periodic(eo_ctx) == event) + reuse = 1; + } + return reuse; +} + +/* handle beartbeat, i.e. run state machine */ +void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_queue_t queue) +{ + if (queue != eo_ctx->my_q) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "heartbeat from wrong queue!\n"); + } + + eo_ctx->heartbeat_count++; + + if (APP_PRINT_DOTS) { + char ch = dot_marks[eo_ctx->state]; + + if (ch != ' ') + APPL_PRINT("%c", ch); + } + + /* reached next state change */ + if (eo_ctx->heartbeat_count >= eo_ctx->heartbeat_target) { + switch (atomic_load_explicit(&eo_ctx->state, memory_order_acquire)) { + case APP_STATE_IDLE: + start_test(eo_ctx); + eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_LIMIT; + break; + case APP_STATE_RUNNING: + stop_test(eo_ctx); + eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_GUARD; + break; + case APP_STATE_STOPPING: + check_test(eo_ctx); + eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_GUARD; + break; + case APP_STATE_CHECKING: + cleanup_test(eo_ctx); + eo_ctx->heartbeat_target = eo_ctx->heartbeat_count + APP_CHECK_GUARD; + break; + default: + break; + } + } +} + +/* new random timeout APP_TIMEOUT_MIN_US ... APP_TIMEOUT_MAX_US in ticks */ +em_timer_tick_t rand_timeout(unsigned int *seed, app_eo_ctx_t *eo_ctx, + unsigned int fixed) +{ + uint64_t us; + double tick_ns = 1000000000.0 / (double)eo_ctx->hz; + + if (fixed) { + us = fixed; + } else { + us = (uint64_t)rand_r(seed) % (APP_TIMEOUT_MAX_US - APP_TIMEOUT_MIN_US + 1); + us += APP_TIMEOUT_MIN_US; + } + + return (em_timer_tick_t)((double)us * 1000.0 / tick_ns); +} + +/* start new batch of random timeouts */ +void set_timeouts(app_eo_ctx_t *eo_ctx) +{ + app_msg_t *msg; + int i; + uint64_t t1, t2; + struct timespec ts1, ts2; + + /* timeouts allocate new events every time (could reuse old ones). + * Do this first so we can time just the tmo creation + */ + for (i = 0; i < APP_MAX_TMOS; i++) { + em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, + m_shm->pool); + if (event == EM_EVENT_UNDEF) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't allocate event nr %d!", i + 1); + + /* prepare as timeout event */ + msg = em_event_pointer(event); + msg->command = APP_CMD_TMO_SINGLE; + msg->index = i; + msg->dummy_delay = 0; + memset(&eo_ctx->oneshot.tmo[i], 0, sizeof(app_tmo_data_t)); + eo_ctx->oneshot.tmo[i].event = event; + } + + t1 = em_timer_current_tick(m_shm->tmr); + clock_gettime(APP_LINUX_CLOCK_SRC, &ts1); + /* allocate new tmos every time (could reuse) */ + for (i = 0; i < APP_MAX_TMOS; i++) { + em_tmo_t tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_ONESHOT, + eo_ctx->my_prio_q); + + if (unlikely(tmo == EM_TMO_UNDEF)) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't allocate tmo nr %d!", i + 1); + + eo_ctx->oneshot.tmo[i].tmo = tmo; + } + + t2 = em_timer_current_tick(m_shm->tmr); + clock_gettime(APP_LINUX_CLOCK_SRC, &ts2); + APPL_PRINT("Timer: Creating %d timeouts took %" PRIu64 " ns (%" PRIu64 + " ns each)\n", i, + tick_diff_ns(t1, t2, eo_ctx->hz), + tick_diff_ns(t1, t2, eo_ctx->hz) / APP_MAX_TMOS); + APPL_PRINT("Linux: Creating %d timeouts took %" PRIu64 " ns (%" PRIu64 + " ns each)\n", i, ts_diff_ns(&ts1, &ts2), + ts_diff_ns(&ts1, &ts2) / APP_MAX_TMOS); + + /* start them all. Some might be served before this loop ends! */ + for (i = 0; i < APP_MAX_TMOS; i++) { + unsigned int fixed = 0; + + /* always test min and max tmo */ + if (i == 0) + fixed = APP_TIMEOUT_MAX_US; + else if (i == 1) + fixed = APP_TIMEOUT_MIN_US; + + eo_ctx->oneshot.tmo[i].howmuch = rand_timeout(&m_randseed, + eo_ctx, fixed); + eo_ctx->oneshot.tmo[i].when = em_timer_current_tick(m_shm->tmr); + clock_gettime(APP_LINUX_CLOCK_SRC, + &eo_ctx->oneshot.tmo[i].linux_when); + if (em_tmo_set_rel(eo_ctx->oneshot.tmo[i].tmo, + eo_ctx->oneshot.tmo[i].howmuch, + eo_ctx->oneshot.tmo[i].event) != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't activate tmo!\n"); + } + if (APP_MAX_TMOS) + APPL_PRINT("Started single shots\n"); + + /* then periodic */ + for (i = 0; i < APP_MAX_PERIODIC; i++) { + unsigned int fixed = 0; + + em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, + m_shm->pool); + if (event == EM_EVENT_UNDEF) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't allocate event!"); + + msg = em_event_pointer(event); + msg->command = APP_CMD_TMO_PERIODIC; + msg->index = i; + msg->dummy_delay = (i % APP_INC_DLY_MODULO) ? + 0 : APP_INCREASING_DLY; + memset(&eo_ctx->periodic.tmo[i], 0, sizeof(app_tmo_data_t)); + eo_ctx->periodic.tmo[i].event = event; + + em_tmo_t tmo = em_tmo_create(m_shm->tmr, EM_TMO_FLAG_PERIODIC, + eo_ctx->my_prio_q); + if (unlikely(tmo == EM_TMO_UNDEF)) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't allocate periodic tmo nr %d!", i + 1); + eo_ctx->periodic.tmo[i].tmo = tmo; + + /* always test min and max tmo */ + if (i == 0) + fixed = APP_TIMEOUT_MAX_US; + else if (i == 1) + fixed = APP_TIMEOUT_MIN_US; + eo_ctx->periodic.tmo[i].howmuch = rand_timeout(&m_randseed, + eo_ctx, fixed); + eo_ctx->periodic.tmo[i].when = em_timer_current_tick(m_shm->tmr); + if (em_tmo_set_periodic(eo_ctx->periodic.tmo[i].tmo, + 0, + eo_ctx->periodic.tmo[i].howmuch, + eo_ctx->periodic.tmo[i].event) != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't activate periodic tmo nr %d!\n", i + 1); + } + + if (APP_MAX_PERIODIC) + APPL_PRINT("Started periodic\n"); +} + +void start_test(app_eo_ctx_t *eo_ctx) +{ + eo_ctx->oneshot.received = 0; + eo_ctx->oneshot.cancelled = 0; + eo_ctx->oneshot.cancel_fail = 0; + + eo_ctx->periodic.received = 0; + eo_ctx->periodic.cancelled = 0; + eo_ctx->periodic.cancel_fail = 0; + + time_t t = time(NULL); + struct tm *tm = localtime(&t); + char s[40]; + + strftime(s, sizeof(s), "%b-%d %H:%M:%S", tm); + eo_ctx->rounds++; + APPL_PRINT("\n\n%s ROUND %" PRIu64 " ************\n", + s, eo_ctx->rounds); + + eo_ctx->nocancel = 1; + /* do this before starting tmo as some could be received while still here */ + atomic_store_explicit(&eo_ctx->state, APP_STATE_RUNNING, memory_order_release); + + set_timeouts(eo_ctx); /* timeouts start coming */ + APPL_PRINT("Running\n"); + eo_ctx->nocancel = 0; /* after all timeouts are completely created */ +} + +void stop_test(app_eo_ctx_t *eo_ctx) +{ + em_event_t event; + + /* test assumes all oneshots are received, + * but this will stop possible periodic timeout processing + */ + atomic_store_explicit(&eo_ctx->state, APP_STATE_STOPPING, memory_order_release); + + /* cancel ongoing periodic */ + for (int i = 0; i < APP_MAX_PERIODIC; i++) { + event = EM_EVENT_UNDEF; + + /* lock tmo to avoid race with possible unfinished random cancel */ + while (atomic_flag_test_and_set_explicit(&eo_ctx->periodic.tmo[i].lock, + memory_order_acquire)) + ; + + /* double cancel is an error */ + if (!eo_ctx->periodic.tmo[i].canceled && !eo_ctx->periodic.tmo[i].waitevt) { + em_status_t ret = em_tmo_cancel(eo_ctx->periodic.tmo[i].tmo, &event); + + if (ret != EM_OK && ret != EM_ERR_TOONEAR) { + APPL_PRINT("%s: cancel returned %u!\n", __func__, ret); + eo_ctx->errors++; + } + } + atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[i].lock, memory_order_release); + if (event != EM_EVENT_UNDEF) + em_free(event); + } +} + +void cleanup_test(app_eo_ctx_t *eo_ctx) +{ + int i; + uint64_t t1, t2; + struct timespec ts1, ts2; + + APPL_PRINT("\nCleaning up\n"); + + t1 = em_timer_current_tick(m_shm->tmr); + clock_gettime(APP_LINUX_CLOCK_SRC, &ts1); + for (i = 0; i < APP_MAX_TMOS; i++) { + em_event_t evt = EM_EVENT_UNDEF; + + if (eo_ctx->oneshot.tmo[i].tmo == EM_TMO_UNDEF) + continue; + + if (em_tmo_get_state(eo_ctx->oneshot.tmo[i].tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->oneshot.tmo[i].tmo, &evt); + + if (em_tmo_delete(eo_ctx->oneshot.tmo[i].tmo) != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't free tmo!\n"); + + eo_ctx->oneshot.tmo[i].tmo = EM_TMO_UNDEF; + if (evt != EM_EVENT_UNDEF && !appl_shm->exit_flag) { + APPL_PRINT("WARN - tmo_delete returned event,\n" + " should be received or canceled!\n"); + em_free(evt); + } + } + t2 = em_timer_current_tick(m_shm->tmr); + clock_gettime(APP_LINUX_CLOCK_SRC, &ts2); + APPL_PRINT("Timer: Deleting %d timeouts took %" PRIu64 + " ns (%" PRIu64 " ns each)\n", i, + tick_diff_ns(t1, t2, eo_ctx->hz), + tick_diff_ns(t1, t2, eo_ctx->hz) / APP_MAX_TMOS); + APPL_PRINT("Linux: Deleting %d timeouts took %" PRIu64 " ns (%" PRIu64 + " ns each)\n", i, ts_diff_ns(&ts1, &ts2), + ts_diff_ns(&ts1, &ts2) / APP_MAX_TMOS); + + for (i = 0; i < APP_MAX_PERIODIC; i++) { + em_event_t evt = EM_EVENT_UNDEF; + + if (eo_ctx->periodic.tmo[i].tmo == EM_TMO_UNDEF) + continue; + + if (em_tmo_get_state(eo_ctx->periodic.tmo[i].tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->periodic.tmo[i].tmo, &evt); + + if (em_tmo_delete(eo_ctx->periodic.tmo[i].tmo) != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Can't delete periodic tmo!\n"); + eo_ctx->periodic.tmo[i].tmo = EM_TMO_UNDEF; + if (evt != EM_EVENT_UNDEF) + em_free(evt); + } + atomic_store_explicit(&eo_ctx->state, APP_STATE_IDLE, memory_order_release); +} + +void check_test(app_eo_ctx_t *eo_ctx) +{ + unsigned int errors; + + atomic_store_explicit(&eo_ctx->state, APP_STATE_CHECKING, memory_order_release); + eo_ctx->nocancel = 1; + + APPL_PRINT("\nHeartbeat count %" PRIu64 "\n", eo_ctx->heartbeat_count); + + errors = check_single(eo_ctx); + errors += check_periodic(eo_ctx); + eo_ctx->errors += errors; + APPL_PRINT("Errors: %u\n\n", errors); + + APPL_PRINT("TOTAL RUNTIME/US: min %" PRIi64 ", max %" PRIi64 "\n", + tick_diff_ns(0, eo_ctx->min_diff, eo_ctx->hz) / 1000, + tick_diff_ns(0, eo_ctx->max_diff, eo_ctx->hz) / 1000); + APPL_PRINT("TOTAL RUNTIME LINUX/US: min %" PRIi64 ", max %" PRIi64 "\n", + eo_ctx->min_diff_l / 1000, eo_ctx->max_diff_l / 1000); + APPL_PRINT("TOTAL ERRORS: %u\n", eo_ctx->errors); + APPL_PRINT("TOTAL ACK FAILS (OK): %u\n", eo_ctx->ack_errors); + if (APP_INCREASING_DLY) + APPL_PRINT("TOTAL MAX DUMMY PROCESSING/US: %u\n", + eo_ctx->max_dummy); +} + +/* timespec diff to ns */ +int64_t ts_diff_ns(struct timespec *ts1, struct timespec *ts2) +{ + uint64_t t1 = ts1->tv_nsec + (ts1->tv_sec * 1000000000ULL); + uint64_t t2 = ts2->tv_nsec + (ts2->tv_sec * 1000000000ULL); + + return (t2 - t1); +} + +/* timer tick diff to ns */ +int64_t tick_diff_ns(em_timer_tick_t t1, em_timer_tick_t t2, uint64_t hz) +{ + int64_t ticks = (int64_t)t2 - (int64_t)t1; + double tick_ns = 1000000000.0 / (double)hz; + + return (int64_t)((double)ticks * tick_ns); +} + +void random_cancel(app_eo_ctx_t *eo_ctx) +{ + unsigned int idx = (unsigned int)rand_r(&m_randseed) % + (APP_CANCEL_MODULO ? APP_CANCEL_MODULO : 1); + + if (idx >= APP_MAX_TMOS || idx == 0) + return; + + /* This is tricky as we're possibly canceling a timeout that might be under work + * on another core, so lock tmo state before trying cancel to avoid race + */ + while (atomic_flag_test_and_set_explicit(&eo_ctx->oneshot.tmo[idx].lock, + memory_order_acquire)) + ; + + if (!eo_ctx->oneshot.tmo[idx].canceled && !eo_ctx->oneshot.tmo[idx].waitevt && + eo_ctx->oneshot.tmo[idx].tmo != EM_TMO_UNDEF) { + /* try to cancel (Tmo might have been fired already) */ + em_event_t evt = EM_EVENT_UNDEF; + em_status_t retval; + em_timer_tick_t now; + + retval = em_tmo_cancel(eo_ctx->oneshot.tmo[idx].tmo, &evt); + now = em_timer_current_tick(m_shm->tmr); + if (retval == EM_OK) { + eo_ctx->oneshot.tmo[idx].canceled = now; + eo_ctx->oneshot.cancelled++; + if (evt == EM_EVENT_UNDEF) { /* cancel ok but no event returned */ + APPL_PRINT("ERR: cancel ok but no event!\n"); + eo_ctx->errors++; + } + if (eo_ctx->oneshot.tmo[idx].appeared) { + APPL_PRINT("ERR: cancel ok after event received!\n"); + eo_ctx->errors++; + } + } else { /* cancel fail, too late */ + eo_ctx->oneshot.cancel_fail += 1; + if (evt != EM_EVENT_UNDEF) { /* cancel fail but event returned */ + APPL_PRINT("ERR: cancel fail but event return (rv %u)!\n", retval); + eo_ctx->errors++; + } else { /* event should appear later */ + eo_ctx->oneshot.tmo[idx].waitevt = now; + } + } + + if (evt != EM_EVENT_UNDEF) /* cancelled in time, free event */ + em_free(evt); + + VISUAL_DBG("c"); + } + atomic_flag_clear_explicit(&eo_ctx->oneshot.tmo[idx].lock, memory_order_release); +} + +em_event_t random_cancel_periodic(app_eo_ctx_t *eo_ctx) +{ + unsigned int idx = ((unsigned int)rand_r(&m_randseed)) % + (APP_CANCEL_MODULO_P ? APP_CANCEL_MODULO_P : 1); + + if (idx >= APP_MAX_PERIODIC || idx == 0) + return EM_EVENT_UNDEF; + + /* lock tmo state before trying cancel to avoid race on receive */ + while (atomic_flag_test_and_set_explicit(&eo_ctx->periodic.tmo[idx].lock, + memory_order_acquire)) + ; + + if (!eo_ctx->periodic.tmo[idx].canceled && !eo_ctx->periodic.tmo[idx].waitevt && + eo_ctx->periodic.tmo[idx].tmo != EM_TMO_UNDEF) { + /* try to cancel (Tmo might have been fired already) */ + em_event_t evt = EM_EVENT_UNDEF; + + if (em_tmo_cancel(eo_ctx->periodic.tmo[idx].tmo, &evt) == EM_OK) { + eo_ctx->periodic.tmo[idx].canceled = em_timer_current_tick(m_shm->tmr); + eo_ctx->periodic.cancelled++; + } else { + eo_ctx->periodic.cancel_fail++; + if (evt == EM_EVENT_UNDEF) {/* cancel failed, event should appear */ + eo_ctx->periodic.tmo[idx].waitevt = + em_timer_current_tick(m_shm->tmr); + } + } + eo_ctx->periodic.tmo[idx].appeared = 0; + VISUAL_DBG("C"); + if (evt != EM_EVENT_UNDEF) { + atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[idx].lock, + memory_order_release); + em_free(evt); + return evt; /* to skip wrong free in receive */ + } + } + + atomic_flag_clear_explicit(&eo_ctx->periodic.tmo[idx].lock, memory_order_release); + return EM_EVENT_UNDEF; +} + +unsigned int check_single(app_eo_ctx_t *eo_ctx) +{ + int i; + unsigned int errors = 0; + int64_t min_diff = INT64_MAX; + int64_t max_diff = 0; + int64_t avg_diff = 0; + int64_t min_linux = INT64_MAX; + int64_t max_linux = 0; + int64_t avg_linux = 0; + struct timespec zerot; + + memset(&zerot, 0, sizeof(zerot)); /* 0 to use diff*/ + APPL_PRINT("ONESHOT:\n"); + APPL_PRINT(" Received: %" PRIu64 ", expected %lu\n", + eo_ctx->oneshot.received, + APP_MAX_TMOS - eo_ctx->oneshot.cancelled); + APPL_PRINT(" Cancelled OK: %" PRIu64 "\n", eo_ctx->oneshot.cancelled); + APPL_PRINT(" Cancel failed (too late): %" PRIu64 "\n", + eo_ctx->oneshot.cancel_fail); + + for (i = 0; i < APP_MAX_TMOS; i++) { + /* missing any? */ + if (!eo_ctx->oneshot.tmo[i].canceled && !eo_ctx->oneshot.tmo[i].waitevt && + !eo_ctx->oneshot.tmo[i].appeared) { + APPL_PRINT(" ERR: TMO %d event missing!\n", i); + APPL_PRINT(" - to %lu ticks\n", eo_ctx->oneshot.tmo[i].howmuch); + errors++; + } + + /* calculate timing */ + if (eo_ctx->oneshot.tmo[i].appeared) { + /* timer ticks */ + uint64_t target = eo_ctx->oneshot.tmo[i].when + + eo_ctx->oneshot.tmo[i].howmuch; + int64_t diff = (int64_t)eo_ctx->oneshot.tmo[i].appeared - (int64_t)target; + + if (APP_PRINT_EACH_TMO) + APPL_PRINT("Timeout #%u: diff %" PRIi64 + " ticks\n", i + 1, diff); + if (min_diff > diff) + min_diff = diff; + if (max_diff < diff) + max_diff = diff; + avg_diff += diff; + + /* linux time in ns*/ + int64_t ldiff; + + ldiff = tick_diff_ns(0, eo_ctx->oneshot.tmo[i].howmuch, eo_ctx->hz); + target = ts_diff_ns(&zerot, &eo_ctx->oneshot.tmo[i].linux_when) + ldiff; + diff = (int64_t)ts_diff_ns(&zerot, &eo_ctx->oneshot.tmo[i].linux_appeared) + - (int64_t)target; + if (APP_PRINT_EACH_TMO) + APPL_PRINT("Timeout #%d: diff %" PRIi64 + " linux ns\n", i + 1, diff); + if (min_linux > diff) + min_linux = diff; + if (max_linux < diff) + max_linux = diff; + avg_linux += diff; + } + + /* canceled ok but still appeared */ + if (eo_ctx->oneshot.tmo[i].canceled && eo_ctx->oneshot.tmo[i].appeared) { + APPL_PRINT(" ERR: TMO %d cancel ok but event appeared!\n", i); + APPL_PRINT(" - expire %lu, cancel ok at %lu\n", + eo_ctx->oneshot.tmo[i].when, + eo_ctx->oneshot.tmo[i].canceled); + errors++; + } + + /* cancel failed as too late, but event did not appear */ + if (eo_ctx->oneshot.tmo[i].waitevt && !eo_ctx->oneshot.tmo[i].appeared) { + APPL_PRINT(" ERR: TMO %d cancel fail but event never appeared!\n", i); + APPL_PRINT(" - expire %lu, cancel fail at %lu\n", + eo_ctx->oneshot.tmo[i].when, + eo_ctx->oneshot.tmo[i].waitevt); + errors++; + } + + /* cancel failed but should have succeeded? */ + if (eo_ctx->oneshot.tmo[i].waitevt) { + em_timer_tick_t exp_tick = eo_ctx->oneshot.tmo[i].when + + eo_ctx->oneshot.tmo[i].howmuch; + int64_t diff = tick_diff_ns(eo_ctx->oneshot.tmo[i].waitevt, exp_tick, + eo_ctx->hz); + + if (diff > (int64_t)eo_ctx->min_tmo + + (int64_t)eo_ctx->res_ns + APP_CANCEL_MARGIN_NS) { + APPL_PRINT("ERR: cancel should have worked, "); + APPL_PRINT("%ldns before target(min %lu)\n", diff, eo_ctx->min_tmo); + errors++; + } + } + } + + avg_diff /= (int64_t)eo_ctx->oneshot.received; + avg_linux /= (int64_t)eo_ctx->oneshot.received; + APPL_PRINT(" SUMMARY/TICKS: min %" PRIi64 ", max %" PRIi64 + ", avg %" PRIi64 "\n", min_diff, max_diff, + avg_diff); + APPL_PRINT(" /US: min %" PRIi64 ", max %" PRIi64 + ", avg %" PRIi64 "\n", + tick_diff_ns(0, min_diff, eo_ctx->hz) / 1000, + tick_diff_ns(0, max_diff, eo_ctx->hz) / 1000, + tick_diff_ns(0, avg_diff, eo_ctx->hz) / 1000); + APPL_PRINT(" SUMMARY/LINUX US: min %" PRIi64 ", max %" PRIi64 + ", avg %" PRIi64 "\n", min_linux / 1000, max_linux / 1000, + avg_linux / 1000); + + /* over total runtime */ + if (eo_ctx->min_diff > min_diff) + eo_ctx->min_diff = min_diff; + if (eo_ctx->max_diff < max_diff) + eo_ctx->max_diff = max_diff; + if (eo_ctx->min_diff_l > min_linux) + eo_ctx->min_diff_l = min_linux; + if (eo_ctx->max_diff_l < max_linux) + eo_ctx->max_diff_l = max_linux; + + return errors; +} + +unsigned int check_periodic(app_eo_ctx_t *eo_ctx) +{ + int i; + unsigned int errors = 0; + unsigned int max_dummy = 0; + + APPL_PRINT("PERIODIC:\n"); + APPL_PRINT(" Received: %" PRIu64 "\n", eo_ctx->periodic.received); + APPL_PRINT(" Cancelled: %" PRIu64 "\n", eo_ctx->periodic.cancelled); + APPL_PRINT(" Cancel failed (too late): %" PRIu64 "\n", eo_ctx->periodic.cancel_fail); + + for (i = 0; i < APP_MAX_PERIODIC; i++) { + /* missing? */ + if (!eo_ctx->periodic.tmo[i].canceled && !eo_ctx->periodic.tmo[i].waitevt && + !eo_ctx->periodic.tmo[i].appeared) { + APPL_PRINT(" ERR: No periodic TMO %d event(s)!\n", i); + errors++; + } + /* appeared after successful cancel? */ + if (eo_ctx->periodic.tmo[i].canceled && eo_ctx->periodic.tmo[i].appeared) { + APPL_PRINT(" ERR: periodic TMO %d event(s) after successful cancel!\n", i); + errors++; + } + /* did not appear after failed cancel? */ + if (APP_PER_CANCEL_CHK) { + if (eo_ctx->periodic.tmo[i].waitevt && !eo_ctx->periodic.tmo[i].appeared) { + APPL_PRINT(" ERR: periodic TMO %d no event after failed cancel!\n", + i); + errors++; + } + } + if (max_dummy < eo_ctx->periodic.tmo[i].max_dummy) + max_dummy = eo_ctx->periodic.tmo[i].max_dummy; + } + + if (max_dummy) { + APPL_PRINT(" Max extra processing delay before ack (us): %u\n", max_dummy); + if (eo_ctx->max_dummy < max_dummy) + eo_ctx->max_dummy = max_dummy; + } + + return errors; +} + +/* emulate processing delay */ +static void dummy_processing(unsigned int us) +{ + struct timespec now, sample; + + VISUAL_DBG("D"); + + clock_gettime(APP_LINUX_CLOCK_SRC, &now); + do { + clock_gettime(APP_LINUX_CLOCK_SRC, &sample); + } while (ts_diff_ns(&now, &sample) / 1000ULL < us); + VISUAL_DBG("d"); +} diff --git a/programs/performance/timer_test_periodic.c b/programs/performance/timer_test_periodic.c index a029d92a..792484af 100644 --- a/programs/performance/timer_test_periodic.c +++ b/programs/performance/timer_test_periodic.c @@ -1,2238 +1,2244 @@ -/* - * Copyright (c) 2020-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine timer test for periodic timeouts. - * - * see instructions - string at timer_test_periodic.h. - * - * Exception/error management is simplified and aborts on any error. - */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -#include "timer_test_periodic.h" - -#define VERSION "v1.3" - -struct { - int num_periodic; - uint64_t res_ns; - uint64_t res_hz; - uint64_t period_ns; - int64_t first_ns; - uint64_t max_period_ns; - uint64_t min_period_ns; - uint64_t min_work_ns; - uint64_t max_work_ns; - unsigned int work_prop; - int clock_src; - const char *csv; - int num_runs; - int tracebuf; - int trcstop; - int noskip; - int profile; - int dispatch; - int jobs; - int info_only; - int usehuge; /* for trace buffer */ - int bg_events; - uint64_t bg_time_ns; - int bg_size; - int bg_chunk; - int mz_mb; - int mz_huge; - uint64_t mz_ns; - int abort; /* for testing abnormal exit */ - int num_timers; - int no_del; - int same_tick; - int recreate; - uint64_t stop_limit; - em_event_type_t etype; - -} g_options = { .num_periodic = 1, /* defaults for basic check */ - .res_ns = DEF_RES_NS, - .res_hz = 0, - .period_ns = DEF_PERIOD * DEF_RES_NS, - .first_ns = 0, - .max_period_ns = 0, /* max,min updated in init if not given cmdline */ - .min_period_ns = 0, - .min_work_ns = 0, - .max_work_ns = 0, - .work_prop = 0, - .clock_src = EM_TIMER_CLKSRC_DEFAULT, - .csv = NULL, - .num_runs = 1, - .tracebuf = DEF_TMO_DATA, - .trcstop = ((STOP_THRESHOLD * DEF_TMO_DATA) / 100), - .noskip = 1, - .profile = 0, - .dispatch = 0, - .jobs = 0, - .info_only = 0, - .usehuge = 0, - .bg_events = 0, - .bg_time_ns = 10000, - .bg_size = 5000 * 1024, - .bg_chunk = 50 * 1024, - .mz_mb = 0, - .mz_huge = 0, - .mz_ns = 0, - .abort = 0, - .num_timers = 1, - .no_del = 0, - .same_tick = 0, - .recreate = 0, - .stop_limit = 0, - .etype = EM_EVENT_TYPE_SW - }; - -typedef struct global_stats_t { - uint64_t num_late; /* ack late */ - int64_t max_dev_ns; /* +- max deviation form target */ - int64_t max_early_ns; /* max arrival before target time */ - uint64_t num_tmo; /* total received tmo count */ - int max_cpu; /* max CPU load % (any single) */ - uint64_t max_dispatch; /* max EO receive time */ -} global_stats_t; - -typedef struct app_eo_ctx_t { - e_state state; - em_tmo_t heartbeat_tmo; - em_timer_attr_t tmr_attr; - em_queue_t hb_q; - em_queue_t test_q; - em_queue_t stop_q; - em_queue_t bg_q; - int cooloff; - int last_hbcount; - uint64_t hb_hz; - uint64_t test_hz; - uint64_t time_hz; - uint64_t meas_test_hz; - uint64_t meas_time_hz; - uint64_t linux_hz; - uint64_t max_period; - uint64_t started; - uint64_t stopped; - uint64_t appstart; - uint64_t start_loop_ns; - void *bg_data; - void *mz_data; - uint64_t mz_count; - int stop_sent; - em_atomic_group_t agrp; - global_stats_t global_stat; - tmo_setup *tmo_data; - core_data cdat[MAX_CORES]; -} app_eo_ctx_t; - -typedef struct timer_app_shm_t { - /* Number of EM cores running the application */ - unsigned int core_count; - em_pool_t pool; - app_eo_ctx_t eo_context; - em_timer_t hb_tmr; - em_timer_t test_tmr[MAX_TEST_TIMERS]; -} timer_app_shm_t; - -/* EM-thread locals */ -static __thread timer_app_shm_t *m_shm; - -static void start_periodic(app_eo_ctx_t *eo_context); -static int handle_periodic(app_eo_ctx_t *eo_context, em_event_t event); -static void send_stop(app_eo_ctx_t *eo_context); -static void handle_heartbeat(app_eo_ctx_t *eo_context, em_event_t event); -static void usage(void); -static int parse_my_args(int first, int argc, char *argv[]); -static void analyze(app_eo_ctx_t *eo_ctx); -static void write_trace(app_eo_ctx_t *eo_ctx, const char *name); -static void cleanup(app_eo_ctx_t *eo_ctx); -static int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx); -static uint64_t linux_time_ns(void); -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); -static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo); -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, void *q_context); -static int arg_to_ns(const char *s, int64_t *val); -static void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx); -static void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx); -static void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, - uint64_t tmrtick, uint64_t timetick); -static bool timing_statistics(app_eo_ctx_t *eo_ctx); -static void add_prof(app_eo_ctx_t *eo_ctx, uint64_t t1, e_op op, app_msg_t *msg); -static int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, - uint64_t *min, uint64_t *max, uint64_t *first, - int64_t *tgt_max_ns, int64_t *max_early_ns, int *evnum); -static tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last); -static uint64_t random_tmo_ns(uint64_t minval, uint64_t maxval); -static uint64_t random_work_ns(rnd_state_t *rng); -static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx); -static void exit_cb(em_eo_t eo); -static void send_bg_events(app_eo_ctx_t *eo_ctx); -static int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx); -static int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx); -static em_status_t my_error_handler(em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args); -static void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize); -static void free_tracebuf(void *ptr, size_t realsize); -static void prefault(void *buf, size_t size); -static void show_global_stats(app_eo_ctx_t *eo_ctx); -static void create_timers(app_eo_ctx_t *eo_ctx); -static void delete_timers(app_eo_ctx_t *eo_ctx); -static void first_timer_create(app_eo_ctx_t *eo_ctx); - -/* --------------------------------------- */ -em_status_t my_error_handler(em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args) -{ - if (escope == 0xDEAD) { /* test_fatal_if */ - va_list my_args; - - va_copy(my_args, args); - - char *file = va_arg(my_args, char*); - const char *func = va_arg(my_args, const char*); - const int line = va_arg(my_args, const int); - const char *format = va_arg(my_args, const char*); - const char *base = basename(file); - - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wformat-nonliteral" - fprintf(stderr, "FATAL - %s:%d, %s():\n", - base, line, func); - vfprintf(stderr, format, my_args); - #pragma GCC diagnostic pop - va_end(my_args); - } - return test_error_handler(eo, error, escope, args); -} - -void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx) -{ - static int count; - app_eo_ctx_t *const my_eo_ctx = *eo_ctx; - - (void)eo; - (void)queue; - (void)q_ctx; - - if (unlikely(!my_eo_ctx)) - return; - - if (g_options.dispatch) { - for (int i = 0; i < num; i++) { - app_msg_t *msg = em_event_pointer(events[i]); - - add_trace(my_eo_ctx, msg->id, OP_PROF_ENTER_CB, - 0, count++, -1); - } - } - my_eo_ctx->cdat[em_core_id()].enter = TIME_STAMP_FN(); -} - -void exit_cb(em_eo_t eo) -{ - static int count; - app_eo_ctx_t *const my_eo_ctx = em_eo_get_context(eo); - - if (unlikely(!my_eo_ctx)) - return; - - if (g_options.dispatch) - add_trace(my_eo_ctx, -1, OP_PROF_EXIT_CB, 0, count++, -1); - - core_data *cdat = &my_eo_ctx->cdat[em_core_id()]; - uint64_t took; - - if (__atomic_load_n(&my_eo_ctx->state, __ATOMIC_ACQUIRE) == STATE_RUN) { - took = TIME_STAMP_FN() - cdat->enter; - cdat->acc_time += took; - } -} - -void prefault(void *buf, size_t size) -{ - uint8_t *ptr = (uint8_t *)buf; - - /* write all pages to allocate and pre-fault (reduce runtime jitter) */ - if (EXTRA_PRINTS) - APPL_PRINT("Pre-faulting %lu bytes at %p (EM core %d)\n", size, buf, em_core_id()); - for (size_t i = 0; i < size; i += 4096) - *(ptr + i) = (uint8_t)i; -} - -void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize) -{ - if (g_options.usehuge) { - *realsize = (numbuf + 1) * bufsize; - void *ptr = mmap(NULL, *realsize, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB | MAP_LOCKED, - -1, 0); - if (ptr == MAP_FAILED) { - APPL_PRINT("Huge page mapping failed for trace buffer (%lu bytes)\n", - *realsize); - return NULL; - } else { - return ptr; - } - - } else { - void *buf = calloc(numbuf + 1, bufsize); - - *realsize = numbuf * bufsize; - prefault(buf, *realsize); - return buf; - } -} - -void free_tracebuf(void *ptr, size_t realsize) -{ - if (g_options.usehuge) - munmap(ptr, realsize); - else - free(ptr); -} - -uint64_t linux_time_ns(void) -{ - struct timespec ts; - uint64_t ns; - - clock_gettime(CLOCK_MONOTONIC_RAW, &ts); - ns = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); - return ns; -} - -int arg_to_ns(const char *s, int64_t *val) -{ - char *endp; - int64_t num, mul = 1; - - num = strtol(s, &endp, 0); - if (num == 0 && *s != '0') - return 0; - - if (*endp != '\0') - switch (*endp) { - case 'n': - mul = 1; /* ns */ - break; - case 'u': - mul = 1000; /* us */ - break; - case 'm': - mul = 1000 * 1000; /* ms */ - break; - case 's': - mul = 1000 * 1000 * 1000; /* s */ - break; - default: - return 0; - } - - *val = num * mul; - return 1; -} - -void send_stop(app_eo_ctx_t *eo_ctx) -{ - em_status_t ret; - - if (!eo_ctx->stop_sent) { /* in case state change gets delayed on event overload */ - em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); - - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate stop event!\n"); - - app_msg_t *msg = em_event_pointer(event); - - msg->command = CMD_DONE; - msg->id = em_core_id(); - ret = em_send(event, eo_ctx->stop_q); - test_fatal_if(ret != EM_OK, "em_send(): %s %" PRI_STAT, __func__, ret); - eo_ctx->stop_sent++; - } -} - -void cleanup(app_eo_ctx_t *eo_ctx) -{ - int cores = m_shm->core_count; - - for (int i = 0; i < cores; i++) { - eo_ctx->cdat[i].count = 0; - eo_ctx->cdat[i].cancelled = 0; - eo_ctx->cdat[i].jobs_deleted = 0; - eo_ctx->cdat[i].jobs = 0; - eo_ctx->cdat[i].acc_time = 0; - } -} - -void write_trace(app_eo_ctx_t *eo_ctx, const char *name) -{ - int cores = m_shm->core_count; - FILE *file = stdout; - - if (strcmp(name, "stdout")) - file = fopen(g_options.csv, "w"); - if (file == NULL) { - APPL_PRINT("FAILED to open trace file\n"); - return; - } - - fprintf(file, "\n\n#BEGIN TRACE FORMAT 2\n"); /* for offline analyzers */ - fprintf(file, "res_ns,res_hz,period_ns,max_period_ns,clksrc,num_tmo,loops,"); - fprintf(file, "traces,noskip,SW-ver,bg,mz,timers\n"); - fprintf(file, "%lu,%lu,%lu,%lu,%d,%d,%d,%d,%d,%s,\"%d/%lu\",\"%d/%lu\",%d\n", - g_options.res_ns, - g_options.res_hz, - g_options.period_ns, - g_options.max_period_ns, - g_options.clock_src, - g_options.num_periodic, - g_options.num_runs, - g_options.tracebuf, - g_options.noskip, - VERSION, - g_options.bg_events, g_options.bg_time_ns / 1000UL, - g_options.mz_mb, g_options.mz_ns / 1000000UL, - g_options.num_timers); - fprintf(file, "time_hz,meas_time_hz,timer_hz,meas_timer_hz,linux_hz\n"); - fprintf(file, "%lu,%lu,%lu,%lu,%lu\n", - eo_ctx->time_hz, - eo_ctx->meas_time_hz, - eo_ctx->test_hz, - eo_ctx->meas_test_hz, - eo_ctx->linux_hz); - - fprintf(file, "tmo_id,period_ns,period_ticks,ack_late"); - fprintf(file, ",start_tick,start_ns,first_ns,first\n"); - for (int i = 0; i < g_options.num_periodic; i++) { - fprintf(file, "%d,%lu,%lu,%lu,%lu,%lu,%lu,%lu\n", - i, eo_ctx->tmo_data[i].period_ns, - eo_ctx->tmo_data[i].ticks, - eo_ctx->tmo_data[i].ack_late, - eo_ctx->tmo_data[i].start, - eo_ctx->tmo_data[i].start_ts, - eo_ctx->tmo_data[i].first_ns, - eo_ctx->tmo_data[i].first); - } - - fprintf(file, "id,op,tick,time_ns,linux_time_ns,counter,core,timer\n"); - for (int c = 0; c < cores; c++) { - for (int i = 0; i < eo_ctx->cdat[c].count; i++) { - fprintf(file, "%d,%s,%lu,%lu,%lu,%d,%d,%d\n", - eo_ctx->cdat[c].trc[i].id, - op_labels[eo_ctx->cdat[c].trc[i].op], - eo_ctx->cdat[c].trc[i].tick, - eo_ctx->cdat[c].trc[i].ts, - eo_ctx->cdat[c].trc[i].linuxt, - eo_ctx->cdat[c].trc[i].count, - c, - eo_ctx->cdat[c].trc[i].tidx); - } - } - fprintf(file, "#END TRACE\n\n"); - if (file != stdout) - fclose(file); -} - -void show_global_stats(app_eo_ctx_t *eo_ctx) -{ - APPL_PRINT("\nTOTAL STATS:\n"); - APPL_PRINT(" Num tmo: %lu\n", eo_ctx->global_stat.num_tmo); - APPL_PRINT(" Num late ack: %lu", eo_ctx->global_stat.num_late); - APPL_PRINT(" (%lu %%)\n", - (eo_ctx->global_stat.num_late * 100) / eo_ctx->global_stat.num_tmo); - APPL_PRINT(" Max early arrival: %.1f us %s\n", - ((double)eo_ctx->global_stat.max_early_ns) / 1000.0, - (uint64_t)llabs(eo_ctx->global_stat.max_early_ns) > g_options.res_ns ? "!" : ""); - APPL_PRINT(" Max diff from tgt: %.1f us (res %.1f us) %s\n", - ((double)eo_ctx->global_stat.max_dev_ns) / 1000.0, - (double)g_options.res_ns / 1000.0, - (uint64_t)llabs(eo_ctx->global_stat.max_dev_ns) > (2 * g_options.res_ns) ? - ">2x res!" : ""); - APPL_PRINT(" Max CPU load: %d %%\n", eo_ctx->global_stat.max_cpu); - if (eo_ctx->global_stat.max_dispatch) - APPL_PRINT(" Max EO rcv time: %lu ns\n", eo_ctx->global_stat.max_dispatch); - APPL_PRINT("\n"); -} - -uint64_t random_tmo_ns(uint64_t minval, uint64_t maxval) -{ - if (maxval == 0) - maxval = g_options.max_period_ns; - if (minval == 0) - minval = g_options.min_period_ns; - - uint64_t r = random() % (maxval - minval + 1); - - return r + minval; /* ns between min/max */ -} - -uint64_t random_work_ns(rnd_state_t *rng) -{ - uint64_t r; - int32_t r1; - - random_r(&rng->rdata, &r1); - r = (uint64_t)r1; - if (r % 100 >= g_options.work_prop) /* propability of work roughly */ - return 0; - - random_r(&rng->rdata, &r1); - r = (uint64_t)r1 % (g_options.max_work_ns - g_options.min_work_ns + 1); - return r + g_options.min_work_ns; -} - -tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last) -{ - int cores = m_shm->core_count; - tmo_trace *trc = NULL; - int last_count = 0; - - for (int c = 0; c < cores; c++) { - for (int i = 0; i < eo_ctx->cdat[c].count; i++) { /* find id */ - if (eo_ctx->cdat[c].trc[i].op == OP_TMO && - eo_ctx->cdat[c].trc[i].id == id) { /* this TMO */ - if (eo_ctx->cdat[c].trc[i].count == count) - trc = &eo_ctx->cdat[c].trc[i]; - /* always run through for last_count */ - if (eo_ctx->cdat[c].trc[i].count > last_count) - last_count = eo_ctx->cdat[c].trc[i].count; - } - } - } - *last = last_count; - return trc; -} - -int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, - uint64_t *min, uint64_t *max, uint64_t *first, - int64_t *tgt_max, int64_t *max_early_ns, int *evnum) -{ - int num = 0; - uint64_t diff; - uint64_t prev = 0; - int last = 0; - int last_num; - uint64_t period_ns = eo_ctx->tmo_data[id].period_ns; - uint64_t start_ns = eo_ctx->tmo_data[id].start_ts; - int64_t max_tgt_diff = 0; - - *max = 0; - *min = INT64_MAX; - - /* find in sequential order for diff to work. TODO this gets very slow with many tmos */ - - for (int count = 1; count < g_options.tracebuf; count++) { - tmo_trace *tmo = find_tmo(eo_ctx, id, count, &last_num); - - if (!tmo) { - if (last != count - 1) - APPL_PRINT("MISSING TMO: id %d, count %d\n", id, count); - *tgt_max = max_tgt_diff; - return num; - } - last++; - if (count == 1) { /* first period may be different */ - uint64_t tgt = start_ns + eo_ctx->tmo_data[id].first_ns; - int64_t tgtdiff = (int64_t)tmo->ts - (int64_t)tgt; - - if (llabs(max_tgt_diff) < llabs(tgtdiff)) { - max_tgt_diff = tgtdiff; - *evnum = count; - } - if (tgtdiff < *max_early_ns) - *max_early_ns = tgtdiff; - - diff = tmo->ts - eo_ctx->tmo_data[id].start_ts; - *first = diff; - start_ns += eo_ctx->tmo_data[id].first_ns; /* from now constant period */ - } else { - diff = tmo->ts - prev; - /*skip last, could be while stopping */ - if (last_num > count && tmo->ts < eo_ctx->stopped) { - if (diff > *max) - *max = diff; - if (diff < *min) - *min = diff; - - /* calculate distance to target */ - uint64_t tgt = start_ns + (count - 1) * period_ns; - int64_t tgtdiff = (int64_t)tmo->ts - (int64_t)tgt; - - if (llabs(max_tgt_diff) < llabs(tgtdiff)) { - max_tgt_diff = tgtdiff; - *evnum = count; - } - if (tgtdiff < *max_early_ns) - *max_early_ns = tgtdiff; - } - } - prev = tmo->ts; - num++; - } - *tgt_max = max_tgt_diff; - return num; -} - -bool timing_statistics(app_eo_ctx_t *eo_ctx) -{ - /* basic statistics, more with offline tools (-w) */ - uint64_t max_ts = 0, min_ts = 0, first_ts = 0; - int64_t tgt_max = 0; - const int cores = m_shm->core_count; - uint64_t system_used = eo_ctx->stopped - eo_ctx->started; - bool stop_loops = false; - - for (int c = 0; c < cores; c++) { - core_data *cdat = &eo_ctx->cdat[c]; - uint64_t eo_used = cdat->acc_time; - double perc = (double)eo_used / (double)system_used * 100; - - if (perc > 100) - perc = 100; - APPL_PRINT("STAT_CORE [%d]: %d tmos, %d jobs, EO used %.1f %% CPU time\n", - c, cdat->count, cdat->jobs, perc); - if (perc > eo_ctx->global_stat.max_cpu) - eo_ctx->global_stat.max_cpu = round(perc); - eo_ctx->global_stat.num_tmo += cdat->count; - } - - for (int id = 0; id < g_options.num_periodic; id++) { /* each timeout */ - tmo_setup *tmo_data = &eo_ctx->tmo_data[id]; - int64_t max_early = 0; - int evnum = 0; - int num = do_one_tmo(id, eo_ctx, - &min_ts, &max_ts, &first_ts, &tgt_max, &max_early, &evnum); - - APPL_PRINT("STAT-TMO [%d]: %d tmos (tmr#%d), period %lu ns (", - id, num, tmo_data->tidx, tmo_data->period_ns); - if (num) { - int64_t maxdiff = (int64_t)max_ts - - (int64_t)tmo_data->period_ns; - int64_t mindiff = (int64_t)min_ts - - (int64_t)tmo_data->period_ns; - int64_t firstdiff = (int64_t)first_ts - - (int64_t)tmo_data->first_ns; - - APPL_PRINT("%lu ticks), interval %ld ns ... +%ld ns", - tmo_data->ticks, mindiff, maxdiff); - APPL_PRINT(" (%ld us ... +%ld us)\n", mindiff / 1000, maxdiff / 1000); - if (tmo_data->first_ns != tmo_data->period_ns) - APPL_PRINT(" - 1st period set %lu ns, was %ld ns (diff %.2f us)\n", - tmo_data->first_ns, first_ts, (double)firstdiff / 1000); - APPL_PRINT(" - Max diff from target %.2f us, ev #%d\n", - (double)tgt_max / 1000, evnum); - if (llabs(tgt_max) > llabs(eo_ctx->global_stat.max_dev_ns)) - eo_ctx->global_stat.max_dev_ns = tgt_max; - if (g_options.stop_limit && - ((uint64_t)llabs(tgt_max) > g_options.stop_limit)) - stop_loops = true; - if (max_early < eo_ctx->global_stat.max_early_ns) - eo_ctx->global_stat.max_early_ns = max_early; - } else { - APPL_PRINT(" ERROR - no timeouts received\n"); - if (g_options.stop_limit) - stop_loops = true; - } - } - - APPL_PRINT("Starting timeout loop took %lu us (%lu per tmo)\n", - eo_ctx->start_loop_ns / 1000, - eo_ctx->start_loop_ns / 1000 / g_options.num_periodic); - - if (!g_options.dispatch) - return stop_loops; - - /* - * g_options.dispatch set - * - * Calculate EO rcv min-max-avg: - */ - uint64_t min = UINT64_MAX, max = 0, avg = 0; - uint64_t prev_ts = 0; - int prev_count = 0; - int num = 0; - - for (int c = 0; c < cores; c++) { - for (int i = 0; i < g_options.tracebuf; i++) { - core_data *cdat = &eo_ctx->cdat[c]; - - if (cdat->trc[i].op == OP_PROF_ENTER_CB) { - prev_ts = cdat->trc[i].ts; - prev_count = cdat->trc[i].count; - } else if (cdat->trc[i].op == OP_PROF_EXIT_CB) { - uint64_t diff_ts; - uint64_t ns; - - if (prev_count != cdat->trc[i].count) - APPL_PRINT("No enter cnt=%d\n", prev_count); - - diff_ts = cdat->trc[i].ts - prev_ts; - ns = diff_ts; - - if (ns < min) - min = ns; - if (ns > max) - max = ns; - avg += ns; - num++; - } - } - } - - APPL_PRINT("%d dispatcher enter-exit samples\n", num); - APPL_PRINT("PROF-DISPATCH rcv time: min %lu ns, max %lu ns, avg %lu ns\n", - min, max, num > 0 ? avg / num : 0); - - if (max > eo_ctx->global_stat.max_dispatch) - eo_ctx->global_stat.max_dispatch = max; - - return stop_loops; -} - -void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx) -{ - uint64_t min = UINT64_MAX; - uint64_t max = 0, avg = 0, num = 0; - uint64_t t; - - for (int c = 0; c < cores; c++) { - for (int i = 0; i < g_options.tracebuf; i++) { - if (eo_ctx->cdat[c].trc[i].op == op) { - t = eo_ctx->cdat[c].trc[i].linuxt; - if (min > t) - min = t; - if (max < t) - max = t; - avg += t; - num++; - } - } - } - if (num) - APPL_PRINT("%-15s %-15lu %-15lu %-15lu %-15lu\n", op_labels[op], - num, min, max, avg / num); -} - -void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx) -{ - APPL_PRINT("API timing profiles:\n"); - APPL_PRINT("api count min max avg (ns)\n"); - APPL_PRINT("------------------------------------------------------------------------\n"); - profile_statistics(OP_PROF_CREATE, cores, eo_ctx); - profile_statistics(OP_PROF_SET, cores, eo_ctx); - profile_statistics(OP_PROF_ACK, cores, eo_ctx); - profile_statistics(OP_PROF_DELETE, cores, eo_ctx); - profile_statistics(OP_PROF_CANCEL, cores, eo_ctx); - profile_statistics(OP_PROF_TMR_CREATE, cores, eo_ctx); - profile_statistics(OP_PROF_TMR_DELETE, cores, eo_ctx); -} - -void analyze(app_eo_ctx_t *eo_ctx) -{ - int cores = m_shm->core_count; - int cancelled = 0; - int job_del = 0; - - bool stop = timing_statistics(eo_ctx); - - if (g_options.profile) - profile_all_stats(cores, eo_ctx); - - for (int c = 0; c < cores; c++) { - cancelled += eo_ctx->cdat[c].cancelled; - job_del += eo_ctx->cdat[c].jobs_deleted; - } - - show_global_stats(eo_ctx); - - /* write trace file */ - if (g_options.csv != NULL) - write_trace(eo_ctx, g_options.csv); - - APPL_PRINT("%d/%d timeouts were cancelled\n", cancelled, g_options.num_periodic); - - if (g_options.bg_events) - APPL_PRINT("%d/%d bg jobs were deleted\n", job_del, g_options.bg_events); - if (g_options.mz_mb) - APPL_PRINT("%lu memzeros\n", eo_ctx->mz_count); - double span = eo_ctx->stopped - eo_ctx->started; - - span /= 1000000000; - APPL_PRINT("Timer runtime %f s\n", span); - - test_fatal_if(cancelled != g_options.num_periodic, - "Not all tmos deleted (did not arrive at all?)\n"); - - if (stop) { - APPL_PRINT("STOP due to timing error larger than limit! (%f s)\n", - (double)g_options.stop_limit / 1000000000); - raise(SIGINT); - } -} - -int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx) -{ - int core = em_core_id(); - - if (eo_ctx->cdat[core].trc == NULL) - return 1; /* skip during early startup */ - - tmo_trace *tmo = &eo_ctx->cdat[core].trc[eo_ctx->cdat[core].count]; - - if (eo_ctx->cdat[core].count < g_options.tracebuf) { - if (op < OP_PROF_ACK && (tidx != -1)) /* to be a bit faster for profiling */ - tmo->tick = em_timer_current_tick(m_shm->test_tmr[tidx]); - tmo->op = op; - tmo->id = id; - tmo->ts = TIME_STAMP_FN(); - tmo->linuxt = ns; - tmo->count = count; - tmo->tidx = tidx; - eo_ctx->cdat[core].count++; - } - - return (eo_ctx->cdat[core].count >= g_options.trcstop) ? 0 : 1; -} - -void send_bg_events(app_eo_ctx_t *eo_ctx) -{ - for (int n = 0; n < g_options.bg_events; n++) { - em_event_t event = em_alloc(sizeof(app_msg_t), - EM_EVENT_TYPE_SW, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate bg event!\n"); - app_msg_t *msg = em_event_pointer(event); - - msg->command = CMD_BGWORK; - msg->count = 0; - msg->id = n + 1; - msg->arg = g_options.bg_time_ns; - test_fatal_if(em_send(event, eo_ctx->bg_q) != EM_OK, "Can't allocate bg event!\n"); - } -} - -void start_periodic(app_eo_ctx_t *eo_ctx) -{ - app_msg_t *msg; - em_event_t event; - em_tmo_t tmo; - em_tmo_flag_t flag = EM_TMO_FLAG_PERIODIC; - uint64_t t1 = 0; - uint64_t max_period = 0; - int tidx; - uint64_t first_same_tick = 0; - - if (g_options.noskip) - flag |= EM_TMO_FLAG_NOSKIP; - eo_ctx->stop_sent = 0; - eo_ctx->started = TIME_STAMP_FN(); - - for (int i = 0; i < g_options.num_periodic; i++) { - event = em_alloc(sizeof(app_msg_t), g_options.etype, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "Can't allocate test event (%ldB)!\n", - sizeof(app_msg_t)); - - msg = em_event_pointer(event); - msg->command = CMD_TMO; - msg->count = 0; - msg->id = i; - tidx = random() % g_options.num_timers; - msg->tidx = tidx; - - if (eo_ctx->tmo_data[i].handle == EM_TMO_UNDEF) { /* not -q */ - if (g_options.profile) - t1 = TIME_STAMP_FN(); - tmo = em_tmo_create(m_shm->test_tmr[tidx], flag, eo_ctx->test_q); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_CREATE, msg); - test_fatal_if(tmo == EM_TMO_UNDEF, "Can't allocate test_tmo!\n"); - eo_ctx->tmo_data[i].handle = tmo; - } - msg->tmo = eo_ctx->tmo_data[i].handle; - eo_ctx->tmo_data[i].tidx = tidx; - - uint64_t period_ticks; - uint64_t first = 0; - em_status_t stat; - - if (g_options.period_ns) { - eo_ctx->tmo_data[i].period_ns = g_options.period_ns; - } else { /* 0: use random */ - eo_ctx->tmo_data[i].period_ns = random_tmo_ns(0, 0); - } - if (max_period < eo_ctx->tmo_data[i].period_ns) - max_period = eo_ctx->tmo_data[i].period_ns; - period_ticks = em_timer_ns_to_tick(m_shm->test_tmr[tidx], - eo_ctx->tmo_data[i].period_ns); - - if (EXTRA_PRINTS && i == 0) - APPL_PRINT("Timer Hz %lu\n", eo_ctx->test_hz); - - test_fatal_if(period_ticks < 1, "timer resolution is too low!\n"); - - if (g_options.first_ns < 0) /* use random */ - eo_ctx->tmo_data[i].first_ns = random_tmo_ns(0, llabs(g_options.first_ns)); - else if (g_options.first_ns == 0) /* use period */ - eo_ctx->tmo_data[i].first_ns = eo_ctx->tmo_data[i].period_ns; - else - eo_ctx->tmo_data[i].first_ns = g_options.first_ns; - - first = em_timer_ns_to_tick(m_shm->test_tmr[tidx], eo_ctx->tmo_data[i].first_ns); - eo_ctx->tmo_data[i].ack_late = 0; - eo_ctx->tmo_data[i].ticks = period_ticks; - - /* store start time */ - eo_ctx->tmo_data[i].start_ts = TIME_STAMP_FN(); - eo_ctx->tmo_data[i].start = em_timer_current_tick(m_shm->test_tmr[tidx]); - first += eo_ctx->tmo_data[i].start; /* ticks from now */ - - if (i == 0) { /* save tick from first tmo */ - first_same_tick = first; - } - if (g_options.same_tick) { - first = first_same_tick; - /* this is not accurate, but makes summary analysis work better */ - eo_ctx->tmo_data[i].start_ts = eo_ctx->tmo_data[0].start_ts; - } - eo_ctx->tmo_data[i].first = first; - - if (g_options.profile) - t1 = TIME_STAMP_FN(); - stat = em_tmo_set_periodic(eo_ctx->tmo_data[i].handle, first, period_ticks, event); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_SET, msg); - - if (unlikely(stat != EM_OK)) { - if (EXTRA_PRINTS) { - em_timer_tick_t now = em_timer_current_tick(m_shm->test_tmr[tidx]); - - APPL_PRINT("FAILED to set tmo, stat=%d: first=%lu, ", stat, first); - APPL_PRINT("now %lu (diff %ld), period=%lu\n", - now, (int64_t)first - (int64_t)now, period_ticks); - APPL_PRINT("(first_ns %lu)\n", eo_ctx->tmo_data[i].first_ns); - } - test_fatal_if(1, "Can't activate test tmo!\n"); - } - } - - eo_ctx->start_loop_ns = TIME_STAMP_FN() - eo_ctx->started; - eo_ctx->max_period = max_period; - /* time window to detect possible late timeouts before cleanup */ - eo_ctx->cooloff = ((max_period / 1000000000ULL) * 2) + 1; - if (eo_ctx->cooloff < MIN_COOLOFF) - eo_ctx->cooloff = MIN_COOLOFF; /* HB periods (secs) */ -} - -void add_prof(app_eo_ctx_t *eo_ctx, uint64_t t1, e_op op, app_msg_t *msg) -{ - uint64_t dif = TIME_STAMP_FN() - t1; - int id, count; - - if (unlikely(msg == NULL)) { - id = -1; - count = -1; - } else { - id = msg->id; - count = msg->count; - } - - add_trace(eo_ctx, id, op, dif, count, -1); - /* if this filled the buffer it's handled on next tmo */ -} - -int handle_periodic(app_eo_ctx_t *eo_ctx, em_event_t event) -{ - int core = em_core_id(); - app_msg_t *msg = (app_msg_t *)em_event_pointer(event); - int reuse = 1; - e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); - uint64_t t1 = 0; - em_tmo_stats_t ctrs = { 0 }; /* init to avoid gcc warning with LTO */ - em_status_t ret; - - msg->count++; - - /* this is to optionally test abnormal exits only */ - if (unlikely(g_options.abort != 0) && abs(g_options.abort) <= msg->count) { - if (g_options.abort < 0) { /* cause segfault to test exception here */ - uint64_t *fault = NULL; - /* coverity[FORWARD_NULL] */ - msg->arg = *fault; - } else { - abort(); - } - } - - if (likely(state == STATE_RUN)) { /* add tmo trace */ - if (!add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx)) - send_stop(eo_ctx); /* triggers state change to stop */ - - if (unlikely(em_tmo_get_type(event, NULL, false) != EM_TMO_TYPE_PERIODIC)) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Unexpected, event is not tmo\n"); - - if (g_options.work_prop) { - uint64_t work = random_work_ns(&eo_ctx->cdat[core].rng); - - if (work) { /* add extra delay */ - uint64_t t2; - uint64_t ns = TIME_STAMP_FN(); - - do { - t2 = TIME_STAMP_FN(); - } while (t2 < (ns + work)); - add_trace(eo_ctx, msg->id, OP_WORK, work, msg->count, -1); - } - } - - /* only ack while in running state */ - add_trace(eo_ctx, msg->id, OP_ACK, 0, msg->count, msg->tidx); - if (g_options.profile) - t1 = TIME_STAMP_FN(); - em_status_t stat = em_tmo_ack(msg->tmo, event); - - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_ACK, msg); - if (unlikely(stat != EM_OK)) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "ack() fail!\n"); - - } else if (state == STATE_COOLOFF) { /* trace, but cancel */ - em_event_t tmo_event = EM_EVENT_UNDEF; - - add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx); - em_tmo_get_stats(msg->tmo, &ctrs); - APPL_PRINT("STAT-ACK [%d]: %lu acks, %lu late, %lu skips\n", - msg->id, ctrs.num_acks, ctrs.num_late_ack, ctrs.num_period_skips); - eo_ctx->tmo_data[msg->id].ack_late = ctrs.num_late_ack; - eo_ctx->global_stat.num_late += ctrs.num_late_ack; - - if (unlikely(msg->id >= g_options.num_periodic)) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Corrupted tmo msg?\n"); - - if (g_options.profile) - t1 = TIME_STAMP_FN(); - if (g_options.no_del) { /* don't delete each round */ - ret = em_tmo_cancel(msg->tmo, &tmo_event); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_CANCEL, msg); - test_fatal_if(ret == EM_OK, "tmo_cancel ok, expecting fail here!\n"); - } else { - ret = em_tmo_delete(msg->tmo, &tmo_event); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_DELETE, msg); - test_fatal_if(ret != EM_OK, "tmo_delete failed, ret %" PRI_STAT "!\n", ret); - eo_ctx->tmo_data[msg->id].handle = EM_TMO_UNDEF; - } - - eo_ctx->cdat[core].cancelled++; - if (unlikely(tmo_event != EM_EVENT_UNDEF)) { /* not expected as we have the event */ - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "periodic tmo delete returned evt!\n"); - } - add_trace(eo_ctx, msg->id, OP_CANCEL, 0, msg->count, msg->tidx); - reuse = 0; /* free this last tmo event of canceled tmo */ - } else { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Timeout in state %s!\n", state_labels[state]); - } - return reuse; -} - -void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, uint64_t tmrtick, - uint64_t timetick) -{ - uint64_t linux_t2 = linux_time_ns(); - uint64_t time_t2 = TIME_STAMP_FN(); - uint64_t tmr_t2 = em_timer_current_tick(m_shm->test_tmr[0]); - - linux_t2 = linux_t2 - linuxns; - time_t2 = time_t2 - timetick; - tmr_t2 = tmr_t2 - tmrtick; - APPL_PRINT("%lu timer ticks in %lu ns (linux time) ", tmr_t2, linux_t2); - double hz = 1000000000 / - ((double)linux_t2 / (double)tmr_t2); - APPL_PRINT("=> %.1f Hz (%.1f MHz). Timer reports %lu Hz\n", - hz, hz / 1000000, eo_ctx->test_hz); - eo_ctx->meas_test_hz = round(hz); - hz = 1000000000 / ((double)linux_t2 / (double)time_t2); - APPL_PRINT("Timestamp measured: %.1f Hz (%.1f MHz)\n", hz, hz / 1000000); - eo_ctx->meas_time_hz = round(hz); - - test_fatal_if(tmr_t2 < 1, "TIMER SEEMS NOT RUNNING AT ALL!?"); -} - -int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx) -{ - static int count; - - add_trace(eo_ctx, -1, OP_MEMZERO, g_options.mz_mb, msg->count, -1); - if (eo_ctx->mz_data == NULL) { /* first time we only allocate */ - if (g_options.mz_huge) { - eo_ctx->mz_data = mmap(NULL, g_options.mz_mb * 1024UL * 1024UL, - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | - MAP_HUGETLB | MAP_LOCKED, - -1, 0); - if (eo_ctx->mz_data == MAP_FAILED) - eo_ctx->mz_data = NULL; - } else { - eo_ctx->mz_data = malloc(g_options.mz_mb * 1024UL * 1024UL); - } - test_fatal_if(eo_ctx->mz_data == NULL, "mz_mem reserve failed!"); - } else { - memset(eo_ctx->mz_data, 0, g_options.mz_mb * 1024UL * 1024UL); - eo_ctx->mz_count++; - } - add_trace(eo_ctx, -1, OP_MEMZERO_END, g_options.mz_mb, count, -1); - __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED); - return 0; -} - -int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx) -{ - app_msg_t *msg = (app_msg_t *)em_event_pointer(evt); - uint64_t t1 = TIME_STAMP_FN(); - uint64_t ts; - int32_t rnd; - int core = em_core_id(); - uint64_t sum = 0; - - if (__atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE) != STATE_RUN) { - eo_ctx->cdat[core].jobs_deleted++; - if (EXTRA_PRINTS) - APPL_PRINT("Deleting job after %u iterations\n", msg->count); - return 0; /* stop & delete */ - } - - if (g_options.jobs) - add_trace(eo_ctx, -1, OP_BGWORK, msg->arg, msg->count, -1); - - msg->count++; - eo_ctx->cdat[core].jobs++; - int blocks = g_options.bg_size / g_options.bg_chunk; - - random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); - rnd = rnd % blocks; - uint64_t *dptr = (uint64_t *)((uintptr_t)eo_ctx->bg_data + rnd * g_options.bg_chunk); - - do { - /* jump around memory reading from selected chunk */ - random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); - rnd = rnd % (g_options.bg_chunk / sizeof(uint64_t)); - sum += *(dptr + rnd); - ts = TIME_STAMP_FN() - t1; - } while (ts < msg->arg); - - *dptr = sum; - - if (g_options.mz_mb && msg->id == 1) { /* use only one job stream for memzero */ - static uint64_t last_mz; - - if (msg->count < 10) /* don't do mz before some time */ - last_mz = TIME_STAMP_FN(); - ts = TIME_STAMP_FN() - last_mz; - if (ts > g_options.mz_ns) { - do_memzero(msg, eo_ctx); - last_mz = TIME_STAMP_FN(); - } - } - - test_fatal_if(em_send(evt, eo_ctx->bg_q) != EM_OK, "Failed to send BG job event!"); - return 1; -} - -void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event) -{ - app_msg_t *msg = (app_msg_t *)em_event_pointer(event); - int cores = m_shm->core_count; - int done = 0; - e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_SEQ_CST); - static int runs; - static uint64_t linuxns; - static uint64_t tmrtick; - static uint64_t timetick; - static bool startup = true; - - /* heartbeat runs states of the test */ - - if (startup) { - first_timer_create(eo_ctx); - startup = false; - } - - msg->count++; - add_trace(eo_ctx, -1, OP_HB, linux_time_ns(), msg->count, -1); - - if (EXTRA_PRINTS) - APPL_PRINT("."); - - switch (state) { - case STATE_INIT: - if (msg->count > eo_ctx->last_hbcount + INIT_WAIT) { - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - eo_ctx->last_hbcount = msg->count; - APPL_PRINT("ROUND %d\n", runs + 1); - APPL_PRINT("->Starting tick measurement\n"); - } - break; - - case STATE_MEASURE: /* measure timer frequencies */ - if (linuxns == 0) { - linuxns = linux_time_ns(); - timetick = TIME_STAMP_FN(); - /* use timer[0] for this always */ - tmrtick = em_timer_current_tick(m_shm->test_tmr[0]); - } - if (msg->count > eo_ctx->last_hbcount + MEAS_PERIOD) { - analyze_measure(eo_ctx, linuxns, tmrtick, timetick); - linuxns = 0; - /* start new run */ - if (g_options.num_runs > 1) - APPL_PRINT("** Round %d\n", runs + 1); - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - } - break; - - case STATE_STABILIZE: /* give some time to get up */ - if (g_options.bg_events) - send_bg_events(eo_ctx); - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - if (EXTRA_PRINTS) - APPL_PRINT("->Starting tmos\n"); - start_periodic(eo_ctx); - eo_ctx->last_hbcount = msg->count; - break; - - case STATE_RUN: /* run the test, avoid prints */ - for (int i = 0; i < cores; i++) { - if (eo_ctx->cdat[i].count >= - g_options.tracebuf) { - done++; - break; - } - } - if (done) { - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - eo_ctx->last_hbcount = msg->count; - if (EXTRA_PRINTS) - APPL_PRINT("->All cores done\n"); - } - break; - - case STATE_COOLOFF: /* stop further timeouts */ - if (msg->count > (eo_ctx->last_hbcount + eo_ctx->cooloff)) { - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - eo_ctx->last_hbcount = msg->count; - if (EXTRA_PRINTS) - APPL_PRINT("->Starting analyze\n"); - } - break; - - case STATE_ANALYZE: /* expected to be stopped, analyze data */ - APPL_PRINT("\n"); - if (g_options.recreate) - delete_timers(eo_ctx); - analyze(eo_ctx); - cleanup(eo_ctx); - /* re-start test cycle */ - __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); - runs++; - if (runs >= g_options.num_runs && g_options.num_runs != 0) { - /* terminate test app */ - APPL_PRINT("%d runs done\n", runs); - raise(SIGINT); - } - eo_ctx->last_hbcount = msg->count; - if (g_options.recreate) - create_timers(eo_ctx); - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid test state"); - } - - /* heartbeat never stops */ - if (em_tmo_ack(eo_ctx->heartbeat_tmo, event) != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "HB ack() fail!\n"); -} - -void usage(void) -{ - printf("%s\n", instructions); - - printf("Usage:\n"); - for (int i = 0; ; i++) { - if (longopts[i].name == NULL || descopts[i] == NULL) - break; - printf("--%s or -%c: %s\n", longopts[i].name, longopts[i].val, descopts[i]); - } -} - -int parse_my_args(int first, int argc, char *argv[]) -{ - optind = first + 1; /* skip '--' */ - while (1) { - int opt; - int long_index; - char *endptr; - int64_t num; - - opt = getopt_long(argc, argv, shortopts, longopts, &long_index); - - if (opt == -1) - break; /* No more options */ - - switch (opt) { - case 's': { - g_options.noskip = 0; - } - break; - case 'a': { - g_options.profile = 1; - } - break; - case 'b': { - g_options.jobs = 1; - } - break; - case 'd': { - g_options.dispatch = 1; - } - break; - case 'i': { - g_options.info_only = 1; - } - break; - case 'u': { - g_options.usehuge = 1; - } - break; - case 'q': { - g_options.no_del = 1; - } - break; - case 'S': { - g_options.same_tick = 1; - } - break; - case 'R': { - g_options.recreate = 1; - } - break; - case 'w': { /* optional arg */ - g_options.csv = "stdout"; - if (optarg != NULL) - g_options.csv = optarg; - } - break; - case 'm': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 1) - return 0; - g_options.max_period_ns = (uint64_t)num; - } - break; - case 'L': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 0) - return 0; - g_options.stop_limit = (uint64_t)num; - } - break; - case 'l': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 1) - return 0; - g_options.min_period_ns = num; - } - break; - case 't': { - unsigned long size, perc; - - num = sscanf(optarg, "%lu,%lu", &size, &perc); - if (num == 0 || size < 10 || - sizeof(tmo_trace) * size > MAX_TMO_BYTES) - return 0; - g_options.tracebuf = size; - if (num == 2 && perc > 100) - return 0; - if (num == 2) - g_options.trcstop = ((perc * size) / 100); - else - g_options.trcstop = ((STOP_THRESHOLD * size) / 100); - } - break; - case 'e': { - unsigned int min_us, max_us, prop; - - if (sscanf(optarg, "%u,%u,%u", &min_us, &max_us, &prop) != 3) - return 0; - if (prop > 100 || max_us < 1) - return 0; - g_options.min_work_ns = 1000ULL * min_us; - g_options.max_work_ns = 1000ULL * max_us; - g_options.work_prop = prop; - } - break; - case 'o': { - unsigned int mb; - uint64_t ms; - unsigned int hp = 0; - - if (sscanf(optarg, "%u,%lu,%u", &mb, &ms, &hp) < 2) - return 0; - if (mb < 1 || ms < 1) - return 0; - g_options.mz_mb = mb; - g_options.mz_ns = ms * 1000UL * 1000UL; - if (hp) - g_options.mz_huge = 1; - } - break; - case 'j': { - unsigned int evts, us, kb, chunk; - - num = sscanf(optarg, "%u,%u,%u,%u", &evts, &us, &kb, &chunk); - if (num == 0 || evts < 1) - return 0; - g_options.bg_events = evts; - if (num > 1 && us) - g_options.bg_time_ns = us * 1000ULL; - if (num > 2 && kb) - g_options.bg_size = kb * 1024; - if (num > 3 && chunk) - g_options.bg_chunk = chunk * 1024; - if (g_options.bg_chunk > g_options.bg_size) - return 0; - } - break; - case 'n': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.num_periodic = num; - } - break; - case 'p': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 0) - return 0; - g_options.period_ns = num; - } - break; - case 'f': { - if (!arg_to_ns(optarg, &num)) - return 0; - g_options.first_ns = num; - } - break; - case 'c': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 0) - return 0; - g_options.clock_src = num; - } - break; - case 'r': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 0) - return 0; - g_options.res_ns = num; - } - break; - case 'z': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.res_hz = num; - g_options.res_ns = 0; - } - break; - case 'x': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 0) - return 0; - g_options.num_runs = num; - } - break; - case 'k': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0') - return 0; - g_options.abort = num; - } - break; - - case 'y': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.num_timers = num; - } - break; - - case 'g': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 0) - return 0; - g_options.etype = (em_event_type_t)num; - } - break; - - case 'h': - default: - opterr = 0; - usage(); - return 0; - } - } - - optind = 1; /* cm_setup() to parse again */ - return 1; -} - -/** - * Before EM - Init - */ -void test_init(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - /* first core creates ShMem */ - if (core == 0) { - m_shm = env_shared_reserve("Timer_test", sizeof(timer_app_shm_t)); - /* initialize it */ - if (m_shm) - memset(m_shm, 0, sizeof(timer_app_shm_t)); - - if (EXTRA_PRINTS) - APPL_PRINT("%ldk shared memory for app context\n", - sizeof(timer_app_shm_t) / 1000); - - } else { - m_shm = env_shared_lookup("Timer_test"); - } - - if (m_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "ShMem init failed on EM-core: %u", - em_core_id()); - } - - APPL_PRINT("core %d: %s done\n", core, __func__); -} - -/** - * Startup of the timer test EM application - */ -void test_start(const appl_conf_t *appl_conf) -{ - em_eo_t eo; - em_queue_t queue; - em_status_t stat; - em_timer_attr_t attr; - app_eo_ctx_t *eo_ctx; - em_timer_res_param_t res_capa; - em_timer_capability_t capa = { 0 }; /* init to avoid gcc warning with LTO */ - em_core_mask_t mask; - em_queue_group_t grp; - em_atomic_group_t agrp; - - /* Store the number of EM-cores running the application */ - m_shm->core_count = appl_conf->core_count; - - if (appl_conf->num_procs > 1) { - APPL_PRINT("\n!! Multiple PROCESS MODE NOT SUPPORTED !!\n\n"); - raise(SIGINT); - return; - } - - if (appl_conf->num_pools >= 1) - m_shm->pool = appl_conf->pools[0]; - else - m_shm->pool = EM_POOL_DEFAULT; - - eo_ctx = &m_shm->eo_context; - memset(eo_ctx, 0, sizeof(app_eo_ctx_t)); - eo_ctx->tmo_data = calloc(g_options.num_periodic, sizeof(tmo_setup)); - test_fatal_if(eo_ctx->tmo_data == NULL, "Can't alloc tmo_setups"); - - eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, - app_eo_stop, app_eo_stop_local, app_eo_receive, - eo_ctx); - test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); - - stat = em_register_error_handler(my_error_handler); - test_fatal_if(stat != EM_OK, "Failed to register error handler"); - - /* Create atomic group and queues for control messages */ - stat = em_queue_group_get_mask(EM_QUEUE_GROUP_DEFAULT, &mask); - test_fatal_if(stat != EM_OK, "Failed to get default Q grp mask!"); - grp = em_queue_group_create_sync("CTRL_GRP", &mask); - test_fatal_if(grp == EM_QUEUE_GROUP_UNDEF, "Failed to create Q grp!"); - agrp = em_atomic_group_create("CTRL_AGRP", grp); - test_fatal_if(agrp == EM_ATOMIC_GROUP_UNDEF, "Failed to create atomic grp!"); - eo_ctx->agrp = agrp; - - queue = em_queue_create_ag("Control Q", EM_QUEUE_PRIO_NORMAL, agrp, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create hb queue!"); - eo_ctx->hb_q = queue; - - /* Highest priority queue for stop msg to handle tmo overload */ - queue = em_queue_create_ag("Stop Q", EM_QUEUE_PRIO_HIGHEST, agrp, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create stop queue!"); - eo_ctx->stop_q = queue; - - /* parallel high priority for timeout handling*/ - queue = em_queue_create("Tmo Q", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_HIGH, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create queue!"); - eo_ctx->test_q = queue; - - /* another parallel low priority for background work*/ - queue = em_queue_create("BG Q", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_LOWEST, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to add queue!"); - eo_ctx->bg_q = queue; - - /* create two timers so HB and tests can be independent */ - em_timer_attr_init(&attr); - strncpy(attr.name, "HBTimer", EM_TIMER_NAME_LEN); - m_shm->hb_tmr = em_timer_create(&attr); - test_fatal_if(m_shm->hb_tmr == EM_TIMER_UNDEF, - "Failed to create HB timer!"); - - /* test timer preparation, timer(s) created later */ - test_fatal_if(g_options.res_ns && g_options.res_hz, "Give resolution in ns OR hz!"); - test_fatal_if(g_options.recreate && g_options.no_del, "Can't keep tmo but delete timers!"); - test_fatal_if(g_options.same_tick && (g_options.num_timers > 1), - "Same tick currently supports only one timer"); - test_fatal_if(g_options.same_tick && (g_options.first_ns < 0), - "Same tick (-S) can't do random first timeout"); - test_fatal_if(g_options.num_timers > MAX_TEST_TIMERS, "Too many test timers"); - - em_timer_attr_init(&eo_ctx->tmr_attr); - stat = em_timer_capability(&capa, g_options.clock_src); - - APPL_PRINT("Timer capability for clksrc %d:\n", g_options.clock_src); - APPL_PRINT(" maximum timers: %d\n", capa.max_timers); - APPL_PRINT(" max_res %lu ns %lu hz min_tmo %lu max_tmo %lu\n", - capa.max_res.res_ns, capa.max_res.res_hz, - capa.max_res.min_tmo, capa.max_res.max_tmo); - APPL_PRINT(" max_tmo %lu ns %lu hz min_tmo %lu max_tmo %lu\n", - capa.max_tmo.res_ns, capa.max_tmo.res_hz, - capa.max_tmo.min_tmo, capa.max_tmo.max_tmo); - - test_fatal_if(stat != EM_OK, "Given clk_src is not supported\n"); - memset(&res_capa, 0, sizeof(em_timer_res_param_t)); - if (!g_options.res_hz) { - res_capa.res_ns = g_options.res_ns == 0 ? capa.max_res.res_ns : g_options.res_ns; - APPL_PRINT("Trying %lu ns resolution capability on clk %d\n", - res_capa.res_ns, g_options.clock_src); - } else { - res_capa.res_hz = g_options.res_hz; - APPL_PRINT("Trying %lu Hz resolution capability on clk %d\n", - res_capa.res_hz, g_options.clock_src); - } - - APPL_PRINT("Asking timer capability for clksrc %d:\n", g_options.clock_src); - APPL_PRINT("%lu ns %lu hz min_tmo %lu max_tmo %lu\n", - res_capa.res_ns, res_capa.res_hz, - res_capa.min_tmo, res_capa.max_tmo); - stat = em_timer_res_capability(&res_capa, g_options.clock_src); - APPL_PRINT("-> Timer res_capability:\n"); - APPL_PRINT("max_res %lu ns %lu hz min_tmo %lu max_tmo %lu\n", - res_capa.res_ns, res_capa.res_hz, - res_capa.min_tmo, res_capa.max_tmo); - test_fatal_if(stat != EM_OK, "Given resolution is not supported (ret %d)\n", stat); - - if (!g_options.max_period_ns) { - g_options.max_period_ns = DEF_MAX_PERIOD; - if (g_options.max_period_ns > res_capa.max_tmo) - g_options.max_period_ns = res_capa.max_tmo; - } - if (!g_options.min_period_ns) { - g_options.min_period_ns = res_capa.res_ns * DEF_MIN_PERIOD; - if (g_options.min_period_ns < res_capa.min_tmo) - g_options.min_period_ns = res_capa.min_tmo; - } - if (g_options.first_ns && (uint64_t)llabs(g_options.first_ns) < g_options.min_period_ns) { - if (g_options.first_ns < 0) - g_options.first_ns = 0 - g_options.min_period_ns; - else - g_options.first_ns = g_options.min_period_ns; - APPL_PRINT("NOTE: First period too short, updated to %ld ns\n", g_options.first_ns); - } - - eo_ctx->tmr_attr.resparam = res_capa; - if (g_options.res_hz) /* can only have one */ - eo_ctx->tmr_attr.resparam.res_ns = 0; - else - eo_ctx->tmr_attr.resparam.res_hz = 0; - eo_ctx->tmr_attr.num_tmo = g_options.num_periodic; - eo_ctx->tmr_attr.resparam.max_tmo = g_options.max_period_ns + - eo_ctx->tmr_attr.resparam.min_tmo; - strncpy(eo_ctx->tmr_attr.name, "TestTimer", EM_TIMER_NAME_LEN); - g_options.res_ns = eo_ctx->tmr_attr.resparam.res_ns; - - /* Start EO */ - stat = em_eo_start_sync(eo, NULL, NULL); - test_fatal_if(stat != EM_OK, "Failed to start EO!"); - - if (g_options.info_only) { /* signal stop here */ - raise(SIGINT); - } - - mlockall(MCL_FUTURE); -} - -void -create_timers(app_eo_ctx_t *eo_ctx) -{ - uint64_t t1 = 0; - - for (int i = 0; i < g_options.num_timers; i++) { - if (g_options.profile) - t1 = TIME_STAMP_FN(); - m_shm->test_tmr[i] = em_timer_create(&eo_ctx->tmr_attr); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_TMR_CREATE, NULL); - test_fatal_if(m_shm->test_tmr[i] == EM_TIMER_UNDEF, - "Failed to create test timer #%d!", i + 1); - } - APPL_PRINT("%d test timers created\n", g_options.num_timers); -} - -void -delete_timers(app_eo_ctx_t *eo_ctx) -{ - uint64_t t1 = 0; - - for (int i = 0; i < g_options.num_timers; i++) { - if (m_shm->test_tmr[i] != EM_TIMER_UNDEF) { - em_status_t ret; - - if (g_options.profile) - t1 = TIME_STAMP_FN(); - ret = em_timer_delete(m_shm->test_tmr[i]); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_TMR_DELETE, NULL); - test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", - m_shm->test_tmr[i], ret); - m_shm->test_tmr[i] = EM_TIMER_UNDEF; - } - } - APPL_PRINT("%d test timers deleted\n", g_options.num_timers); -} - -void -first_timer_create(app_eo_ctx_t *eo_ctx) -{ - em_timer_t tmr[PRINT_MAX_TMRS]; - em_timer_attr_t attr; - - create_timers(eo_ctx); - - int num_timers = em_timer_get_all(tmr, PRINT_MAX_TMRS); - - test_fatal_if(num_timers < 2, "Not all timers created"); - - for (int i = 0; i < (num_timers > PRINT_MAX_TMRS ? PRINT_MAX_TMRS : num_timers); i++) { - if (em_timer_get_attr(tmr[i], &attr) != EM_OK) { - APPL_ERROR("Can't get timer info\n"); - return; - } - APPL_PRINT("Timer \"%s\" info:\n", attr.name); - APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); - APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); - APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); - APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); - APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", - em_timer_get_freq(tmr[i])); - } - - eo_ctx->test_hz = em_timer_get_freq(m_shm->test_tmr[0]); /* use timer[0] */ - test_fatal_if(eo_ctx->test_hz == 0, - "get_freq() failed, timer:%" PRI_TMR "", m_shm->test_tmr[0]); -} - -void test_stop(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - const int core = em_core_id(); - em_status_t ret; - em_eo_t eo; - - if (appl_conf->num_procs > 1) { - APPL_PRINT("%s(): skip\n", __func__); - return; - } - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - eo = em_eo_find(APP_EO_NAME); - test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - - if (g_options.info_only) - return; - - ret = em_timer_delete(m_shm->hb_tmr); - test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", - m_shm->hb_tmr, ret); - delete_timers(&m_shm->eo_context); - free(m_shm->eo_context.tmo_data); -} - -void test_term(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (m_shm != NULL) { - em_unregister_error_handler(); - env_shared_free(m_shm); - m_shm = NULL; - } -} - -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - app_msg_t *msg; - struct timespec ts; - uint64_t period; - em_event_t event; - app_eo_ctx_t *eo_ctx = (app_eo_ctx_t *)eo_context; - - (void)eo; - (void)conf; - - eo_ctx->appstart = TIME_STAMP_FN(); - - if (g_options.info_only) - return EM_OK; - - APPL_PRINT("\nActive run options:\n"); - APPL_PRINT(" num timers: %d\n", g_options.num_timers); - APPL_PRINT(" num timeouts: %d\n", g_options.num_periodic); - if (g_options.res_hz) { - APPL_PRINT(" resolution: %lu Hz (%f MHz)\n", g_options.res_hz, - (double)g_options.res_hz / 1000000); - } else { - APPL_PRINT(" resolution: %lu ns (%f s)\n", g_options.res_ns, - (double)g_options.res_ns / 1000000000); - } - if (g_options.period_ns == 0) - APPL_PRINT(" period: random\n"); - else - APPL_PRINT(" period: %lu ns (%f s%s)\n", g_options.period_ns, - (double)g_options.period_ns / 1000000000, - g_options.period_ns == 0 ? " (random)" : ""); - if (g_options.first_ns < 0) - APPL_PRINT(" first period: random up to %lld ns\n", llabs(g_options.first_ns)); - else - APPL_PRINT(" first period: %ld ns (%fs%s)\n", g_options.first_ns, - (double)g_options.first_ns / 1000000000, - g_options.first_ns == 0 ? " (=period)" : ""); - APPL_PRINT(" max period: %lu ns (%f s)\n", g_options.max_period_ns, - (double)g_options.max_period_ns / 1000000000); - APPL_PRINT(" min period: %lu ns (%f s)\n", g_options.min_period_ns, - (double)g_options.min_period_ns / 1000000000); - if (g_options.num_runs > 1) - APPL_PRINT(" diff err lim: %lu ns (%f s)\n", g_options.stop_limit, - (double)g_options.stop_limit / 1000000000); - APPL_PRINT(" csv: %s\n", - g_options.csv == NULL ? "(no)" : g_options.csv); - APPL_PRINT(" tracebuffer: %d events (%luKiB)\n", - g_options.tracebuf, - g_options.tracebuf * sizeof(tmo_trace) / 1024); - APPL_PRINT(" stop limit: %d events\n", g_options.trcstop); - APPL_PRINT(" use NOSKIP: %s\n", g_options.noskip ? "yes" : "no"); - APPL_PRINT(" profile API: %s\n", g_options.profile ? "yes" : "no"); - APPL_PRINT(" dispatch prof:%s\n", g_options.dispatch ? "yes" : "no"); - APPL_PRINT(" work propability:%u %%\n", g_options.work_prop); - if (g_options.work_prop) { - APPL_PRINT(" min_work: %luns\n", g_options.min_work_ns); - APPL_PRINT(" max_work: %luns\n", g_options.max_work_ns); - } - APPL_PRINT(" bg events: %u\n", g_options.bg_events); - eo_ctx->bg_data = NULL; - if (g_options.bg_events) { - APPL_PRINT(" bg work: %lu us\n", g_options.bg_time_ns / 1000); - APPL_PRINT(" bg data: %u kiB\n", g_options.bg_size / 1024); - APPL_PRINT(" bg chunk: %u kiB (%u blks)\n", - g_options.bg_chunk / 1024, - g_options.bg_size / g_options.bg_chunk); - APPL_PRINT(" bg trace: %s\n", g_options.jobs ? "yes" : "no"); - - eo_ctx->bg_data = malloc(g_options.bg_size); - test_fatal_if(eo_ctx->bg_data == NULL, - "Can't allocate bg work data (%dkiB)!\n", - g_options.bg_size / 1024); - } - APPL_PRINT(" memzero: "); - if (g_options.mz_mb) - APPL_PRINT("%u MB %severy %lu ms\n", - g_options.mz_mb, - g_options.mz_huge ? "(mmap huge) " : "", - g_options.mz_ns / 1000000UL); - else - APPL_PRINT("no\n"); - - if (g_options.abort != 0) { - APPL_PRINT(" abort after: "); - if (g_options.abort) - APPL_PRINT("%d%s\n", - g_options.abort, g_options.abort < 0 ? "(segfault)" : ""); - else - APPL_PRINT("0 (no)\n"); - } - if (g_options.num_runs != 1) { - APPL_PRINT(" delete tmos: %s\n", g_options.no_del ? "no" : "yes"); - APPL_PRINT(" recreate tmr: %s\n", g_options.recreate ? "yes" : "no"); - } - if (g_options.etype != EM_EVENT_TYPE_SW) - APPL_PRINT(" using evtype: %u (0x%X)\n", g_options.etype, g_options.etype); - APPL_PRINT(" same start tick: %s", g_options.same_tick ? "yes" : "no"); - - APPL_PRINT("\nTracing first %d tmo events\n", g_options.tracebuf); - - if (g_options.bg_events) - prefault(eo_ctx->bg_data, g_options.bg_size); - - /* create periodic timeout for heartbeat */ - eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->hb_tmr, EM_TMO_FLAG_PERIODIC, eo_ctx->hb_q); - test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, - "Can't allocate heartbeat_tmo!\n"); - - event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", - sizeof(app_msg_t)); - - msg = em_event_pointer(event); - msg->command = CMD_HEARTBEAT; - msg->count = 0; - msg->id = -1; - eo_ctx->hb_hz = em_timer_get_freq(m_shm->hb_tmr); - if (eo_ctx->hb_hz < 10) - APPL_ERROR("WARNING: HB timer hz very low!\n"); - else - APPL_PRINT("HB timer frequency is %lu\n", eo_ctx->hb_hz); - - period = eo_ctx->hb_hz; /* 1s */ - test_fatal_if(period < 1, "timer resolution is too low!\n"); - - /* linux time check */ - test_fatal_if(clock_getres(CLOCK_MONOTONIC, &ts) != 0, - "clock_getres() failed!\n"); - - period = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); - eo_ctx->linux_hz = 1000000000ULL / period; - APPL_PRINT("Linux reports clock running at %" PRIu64 " hz\n", eo_ctx->linux_hz); - APPL_PRINT("ODP says time_global runs at %lu Hz\n", odp_time_global_res()); - eo_ctx->time_hz = odp_time_global_res(); - - /* start heartbeat */ - __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); - - em_status_t stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, eo_ctx->hb_hz, event); - - if (EXTRA_PRINTS && stat != EM_OK) - APPL_PRINT("FAILED to set HB tmo, stat=%d: period=%lu\n", stat, eo_ctx->hb_hz); - test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); - - stat = em_dispatch_register_enter_cb(enter_cb); - test_fatal_if(stat != EM_OK, "enter_cb() register failed!"); - stat = em_dispatch_register_exit_cb(exit_cb); - test_fatal_if(stat != EM_OK, "exit_cb() register failed!"); - - srandom(time(NULL)); - if (g_options.max_work_ns > RAND_MAX || - g_options.max_period_ns > RAND_MAX) { - double s = (double)RAND_MAX / (double)eo_ctx->test_hz; - - APPL_PRINT("WARNING: rnd number range is less than max values (up to %.4fs)\n", s); - } - if (EXTRA_PRINTS) - APPL_PRINT("WARNING: extra prints enabled, expect some jitter\n"); - - return EM_OK; -} - -/** - * @private - * - * EO per thread start function. - */ -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - int core = em_core_id(); - - (void)eo; - - if (EXTRA_PRINTS) - APPL_PRINT("EO local start\n"); - test_fatal_if(core >= MAX_CORES, "Too many cores!"); - eo_ctx->cdat[core].trc = allocate_tracebuf(g_options.tracebuf, sizeof(tmo_trace), - &eo_ctx->cdat[core].trc_size); - test_fatal_if(eo_ctx->cdat[core].trc == NULL, "Failed to allocate trace buffer!"); - eo_ctx->cdat[core].count = 0; - eo_ctx->cdat[core].cancelled = 0; - eo_ctx->cdat[core].jobs_deleted = 0; - eo_ctx->cdat[core].jobs = 0; - - memset(&eo_ctx->cdat[core].rng, 0, sizeof(rnd_state_t)); - initstate_r(time(NULL), eo_ctx->cdat[core].rng.rndstate, RND_STATE_BUF, - &eo_ctx->cdat[core].rng.rdata); - srandom_r(time(NULL), &eo_ctx->cdat[core].rng.rdata); - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - em_event_t event = EM_EVENT_UNDEF; - em_status_t ret; - - if (EXTRA_PRINTS) - APPL_PRINT("EO stop\n"); - - if (g_options.info_only) - return EM_OK; - - if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { - em_tmo_delete(eo_ctx->heartbeat_tmo, &event); - eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; - if (event != EM_EVENT_UNDEF) - em_free(event); - } - - /* cancel all test timers in case test didn't complete */ - int dcount = 0; - - for (int i = 0; i < g_options.num_periodic; i++) { - if (eo_ctx->tmo_data[i].handle != EM_TMO_UNDEF) { - event = EM_EVENT_UNDEF; - em_tmo_delete(eo_ctx->tmo_data[i].handle, &event); - eo_ctx->tmo_data[i].handle = EM_TMO_UNDEF; - if (event != EM_EVENT_UNDEF) - em_free(event); - dcount++; - } - } - if (dcount) - APPL_PRINT("NOTE: deleted %d still active tmos\n", dcount); - - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); /* remove and delete */ - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); - - ret = em_atomic_group_delete(((app_eo_ctx_t *)eo_context)->agrp); - test_fatal_if(ret != EM_OK, - "EO remove atomic grp:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); - - ret = em_dispatch_unregister_enter_cb(enter_cb); - test_fatal_if(ret != EM_OK, "enter_cb() unregister:%" PRI_STAT, ret); - ret = em_dispatch_unregister_exit_cb(exit_cb); - test_fatal_if(ret != EM_OK, "exit_cb() unregister:%" PRI_STAT, ret); - - if (eo_ctx->bg_data != NULL) - free(eo_ctx->bg_data); - eo_ctx->bg_data = NULL; - if (eo_ctx->mz_data != NULL) { - if (g_options.mz_huge) - munmap(eo_ctx->mz_data, g_options.mz_mb * 1024UL * 1024UL); - else - free(eo_ctx->mz_data); - - eo_ctx->mz_data = NULL; - } - - double rt = TIME_STAMP_FN() - eo_ctx->appstart; - - APPL_PRINT("EO runtime was %.2f min\n", rt / 1e9 / 60); - return EM_OK; -} - -/** - * @private - * - * EO stop local function. - */ -static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo) -{ - int core = em_core_id(); - app_eo_ctx_t *const eo_ctx = eo_context; - - (void)eo; - - if (EXTRA_PRINTS) - APPL_PRINT("EO local stop\n"); - free_tracebuf(eo_ctx->cdat[core].trc, eo_ctx->cdat[core].trc_size); - eo_ctx->cdat[core].trc = NULL; - return EM_OK; -} - -/** - * @private - * - * EO receive function - */ -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, - void *q_context) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - int reuse = 0; - static int last_count; - - (void)q_context; - - if (type == EM_EVENT_TYPE_SW || type == g_options.etype) { - app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); - - switch (msgin->command) { - case CMD_TMO: - reuse = handle_periodic(eo_ctx, event); - break; - - case CMD_HEARTBEAT: /* uses atomic queue */ - handle_heartbeat(eo_ctx, event); - last_count = msgin->count; - reuse = 1; - break; - - case CMD_BGWORK: - reuse = do_bg_work(event, eo_ctx); - break; - - case CMD_DONE: /* HB atomic queue */ { - e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); - - /* only do this once */ - if (state == STATE_RUN && queue == eo_ctx->stop_q) { - __atomic_store_n(&eo_ctx->state, STATE_COOLOFF, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), STATE_COOLOFF, -1); - eo_ctx->last_hbcount = last_count; - eo_ctx->stopped = TIME_STAMP_FN(); - APPL_PRINT("Core %d reported DONE\n", msgin->id); - } - } - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event!\n"); - } - } else { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event type!\n"); - } - - if (!reuse) - em_free(event); -} - -int main(int argc, char *argv[]) -{ - /* pick app-specific arguments after '--' */ - int i; - - APPL_PRINT("EM periodic timer test %s\n\n", VERSION); - - for (i = 1; i < argc; i++) { - if (!strcmp(argv[i], "--")) - break; - } - if (i < argc) { - if (!parse_my_args(i, argc, argv)) { - APPL_PRINT("Invalid application arguments\n"); - return 1; - } - } - - return cm_setup(argc, argv); -} +/* + * Copyright (c) 2020-2021, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine timer test for periodic timeouts. + * + * see instructions - string at timer_test_periodic.h. + * + * Exception/error management is simplified and aborts on any error. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +#include "timer_test_periodic.h" + +#define VERSION "v1.3" + +struct { + int num_periodic; + uint64_t res_ns; + uint64_t res_hz; + uint64_t period_ns; + int64_t first_ns; + uint64_t max_period_ns; + uint64_t min_period_ns; + uint64_t min_work_ns; + uint64_t max_work_ns; + unsigned int work_prop; + int clock_src; + const char *csv; + int num_runs; + int tracebuf; + int trcstop; + int noskip; + int profile; + int dispatch; + int jobs; + int info_only; + int usehuge; /* for trace buffer */ + int bg_events; + uint64_t bg_time_ns; + int bg_size; + int bg_chunk; + int mz_mb; + int mz_huge; + uint64_t mz_ns; + int abort; /* for testing abnormal exit */ + int num_timers; + int no_del; + int same_tick; + int recreate; + uint64_t stop_limit; + em_event_type_t etype; + +} g_options = { .num_periodic = 1, /* defaults for basic check */ + .res_ns = DEF_RES_NS, + .res_hz = 0, + .period_ns = DEF_PERIOD * DEF_RES_NS, + .first_ns = 0, + .max_period_ns = 0, /* max,min updated in init if not given cmdline */ + .min_period_ns = 0, + .min_work_ns = 0, + .max_work_ns = 0, + .work_prop = 0, + .clock_src = EM_TIMER_CLKSRC_DEFAULT, + .csv = NULL, + .num_runs = 1, + .tracebuf = DEF_TMO_DATA, + .trcstop = ((STOP_THRESHOLD * DEF_TMO_DATA) / 100), + .noskip = 1, + .profile = 0, + .dispatch = 0, + .jobs = 0, + .info_only = 0, + .usehuge = 0, + .bg_events = 0, + .bg_time_ns = 10000, + .bg_size = 5000 * 1024, + .bg_chunk = 50 * 1024, + .mz_mb = 0, + .mz_huge = 0, + .mz_ns = 0, + .abort = 0, + .num_timers = 1, + .no_del = 0, + .same_tick = 0, + .recreate = 0, + .stop_limit = 0, + .etype = EM_EVENT_TYPE_SW + }; + +typedef struct global_stats_t { + uint64_t num_late; /* ack late */ + int64_t max_dev_ns; /* +- max deviation form target */ + int64_t max_early_ns; /* max arrival before target time */ + uint64_t num_tmo; /* total received tmo count */ + int max_cpu; /* max CPU load % (any single) */ + uint64_t max_dispatch; /* max EO receive time */ +} global_stats_t; + +typedef struct app_eo_ctx_t { + e_state state; + em_tmo_t heartbeat_tmo; + em_timer_attr_t tmr_attr; + em_queue_t hb_q; + em_queue_t test_q; + em_queue_t stop_q; + em_queue_t bg_q; + int cooloff; + int last_hbcount; + uint64_t hb_hz; + uint64_t test_hz; + uint64_t time_hz; + uint64_t meas_test_hz; + uint64_t meas_time_hz; + uint64_t linux_hz; + uint64_t max_period; + uint64_t started; + uint64_t stopped; + uint64_t appstart; + uint64_t start_loop_ns; + void *bg_data; + void *mz_data; + uint64_t mz_count; + int stop_sent; + em_atomic_group_t agrp; + global_stats_t global_stat; + tmo_setup *tmo_data; + core_data cdat[MAX_CORES]; +} app_eo_ctx_t; + +typedef struct timer_app_shm_t { + /* Number of EM cores running the application */ + unsigned int core_count; + em_pool_t pool; + app_eo_ctx_t eo_context; + em_timer_t hb_tmr; + em_timer_t test_tmr[MAX_TEST_TIMERS]; +} timer_app_shm_t; + +/* EM-thread locals */ +static __thread timer_app_shm_t *m_shm; + +static void start_periodic(app_eo_ctx_t *eo_context); +static int handle_periodic(app_eo_ctx_t *eo_context, em_event_t event); +static void send_stop(app_eo_ctx_t *eo_context); +static void handle_heartbeat(app_eo_ctx_t *eo_context, em_event_t event); +static void usage(void); +static int parse_my_args(int first, int argc, char *argv[]); +static void analyze(app_eo_ctx_t *eo_ctx); +static void write_trace(app_eo_ctx_t *eo_ctx, const char *name); +static void cleanup(app_eo_ctx_t *eo_ctx); +static int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx); +static uint64_t linux_time_ns(void); +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); +static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo); +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, void *q_context); +static int arg_to_ns(const char *s, int64_t *val); +static void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx); +static void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx); +static void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, + uint64_t tmrtick, uint64_t timetick); +static bool timing_statistics(app_eo_ctx_t *eo_ctx); +static void add_prof(app_eo_ctx_t *eo_ctx, uint64_t t1, e_op op, app_msg_t *msg); +static int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, + uint64_t *min, uint64_t *max, uint64_t *first, + int64_t *tgt_max_ns, int64_t *max_early_ns, int *evnum); +static tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last); +static uint64_t random_tmo_ns(uint64_t minval, uint64_t maxval); +static uint64_t random_work_ns(rnd_state_t *rng); +static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx); +static void exit_cb(em_eo_t eo); +static void send_bg_events(app_eo_ctx_t *eo_ctx); +static int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx); +static int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx); +static em_status_t my_error_handler(em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args); +static void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize); +static void free_tracebuf(void *ptr, size_t realsize); +static void prefault(void *buf, size_t size); +static void show_global_stats(app_eo_ctx_t *eo_ctx); +static void create_timers(app_eo_ctx_t *eo_ctx); +static void delete_timers(app_eo_ctx_t *eo_ctx); +static void first_timer_create(app_eo_ctx_t *eo_ctx); + +/* --------------------------------------- */ +em_status_t my_error_handler(em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args) +{ + if (escope == 0xDEAD) { /* test_fatal_if */ + va_list my_args; + + va_copy(my_args, args); + + char *file = va_arg(my_args, char*); + const char *func = va_arg(my_args, const char*); + const int line = va_arg(my_args, const int); + const char *format = va_arg(my_args, const char*); + const char *base = basename(file); + + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wformat-nonliteral" + fprintf(stderr, "FATAL - %s:%d, %s():\n", + base, line, func); + vfprintf(stderr, format, my_args); + #pragma GCC diagnostic pop + va_end(my_args); + } + return test_error_handler(eo, error, escope, args); +} + +void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx) +{ + static int count; + app_eo_ctx_t *const my_eo_ctx = *eo_ctx; + + (void)eo; + (void)queue; + (void)q_ctx; + + if (unlikely(!my_eo_ctx)) + return; + + if (g_options.dispatch) { + for (int i = 0; i < num; i++) { + app_msg_t *msg = em_event_pointer(events[i]); + + add_trace(my_eo_ctx, msg->id, OP_PROF_ENTER_CB, + 0, count++, -1); + } + } + my_eo_ctx->cdat[em_core_id()].enter = TIME_STAMP_FN(); +} + +void exit_cb(em_eo_t eo) +{ + static int count; + app_eo_ctx_t *const my_eo_ctx = em_eo_get_context(eo); + + if (unlikely(!my_eo_ctx)) + return; + + if (g_options.dispatch) + add_trace(my_eo_ctx, -1, OP_PROF_EXIT_CB, 0, count++, -1); + + core_data *cdat = &my_eo_ctx->cdat[em_core_id()]; + uint64_t took; + + if (__atomic_load_n(&my_eo_ctx->state, __ATOMIC_ACQUIRE) == STATE_RUN) { + took = TIME_STAMP_FN() - cdat->enter; + cdat->acc_time += took; + } +} + +void prefault(void *buf, size_t size) +{ + uint8_t *ptr = (uint8_t *)buf; + + /* write all pages to allocate and pre-fault (reduce runtime jitter) */ + if (EXTRA_PRINTS) + APPL_PRINT("Pre-faulting %lu bytes at %p (EM core %d)\n", size, buf, em_core_id()); + for (size_t i = 0; i < size; i += 4096) + *(ptr + i) = (uint8_t)i; +} + +void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize) +{ + if (g_options.usehuge) { + *realsize = (numbuf + 1) * bufsize; + void *ptr = mmap(NULL, *realsize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB | MAP_LOCKED, + -1, 0); + if (ptr == MAP_FAILED) { + APPL_PRINT("Huge page mapping failed for trace buffer (%lu bytes)\n", + *realsize); + return NULL; + } else { + return ptr; + } + + } else { + void *buf = calloc(numbuf + 1, bufsize); + + *realsize = numbuf * bufsize; + prefault(buf, *realsize); + return buf; + } +} + +void free_tracebuf(void *ptr, size_t realsize) +{ + if (g_options.usehuge) + munmap(ptr, realsize); + else + free(ptr); +} + +uint64_t linux_time_ns(void) +{ + struct timespec ts; + uint64_t ns; + + clock_gettime(CLOCK_MONOTONIC_RAW, &ts); + ns = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); + return ns; +} + +int arg_to_ns(const char *s, int64_t *val) +{ + char *endp; + int64_t num, mul = 1; + + num = strtol(s, &endp, 0); + if (num == 0 && *s != '0') + return 0; + + if (*endp != '\0') + switch (*endp) { + case 'n': + mul = 1; /* ns */ + break; + case 'u': + mul = 1000; /* us */ + break; + case 'm': + mul = 1000 * 1000; /* ms */ + break; + case 's': + mul = 1000 * 1000 * 1000; /* s */ + break; + default: + return 0; + } + + *val = num * mul; + return 1; +} + +void send_stop(app_eo_ctx_t *eo_ctx) +{ + em_status_t ret; + + if (!eo_ctx->stop_sent) { /* in case state change gets delayed on event overload */ + em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); + + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate stop event!\n"); + + app_msg_t *msg = em_event_pointer(event); + + msg->command = CMD_DONE; + msg->id = em_core_id(); + ret = em_send(event, eo_ctx->stop_q); + test_fatal_if(ret != EM_OK, "em_send(): %s %" PRI_STAT, __func__, ret); + eo_ctx->stop_sent++; + } +} + +void cleanup(app_eo_ctx_t *eo_ctx) +{ + int cores = m_shm->core_count; + + for (int i = 0; i < cores; i++) { + eo_ctx->cdat[i].count = 0; + eo_ctx->cdat[i].cancelled = 0; + eo_ctx->cdat[i].jobs_deleted = 0; + eo_ctx->cdat[i].jobs = 0; + eo_ctx->cdat[i].acc_time = 0; + } +} + +void write_trace(app_eo_ctx_t *eo_ctx, const char *name) +{ + int cores = m_shm->core_count; + FILE *file = stdout; + + if (strcmp(name, "stdout")) + file = fopen(g_options.csv, "w"); + if (file == NULL) { + APPL_PRINT("FAILED to open trace file\n"); + return; + } + + fprintf(file, "\n\n#BEGIN TRACE FORMAT 2\n"); /* for offline analyzers */ + fprintf(file, "res_ns,res_hz,period_ns,max_period_ns,clksrc,num_tmo,loops,"); + fprintf(file, "traces,noskip,SW-ver,bg,mz,timers\n"); + fprintf(file, "%lu,%lu,%lu,%lu,%d,%d,%d,%d,%d,%s,\"%d/%lu\",\"%d/%lu\",%d\n", + g_options.res_ns, + g_options.res_hz, + g_options.period_ns, + g_options.max_period_ns, + g_options.clock_src, + g_options.num_periodic, + g_options.num_runs, + g_options.tracebuf, + g_options.noskip, + VERSION, + g_options.bg_events, g_options.bg_time_ns / 1000UL, + g_options.mz_mb, g_options.mz_ns / 1000000UL, + g_options.num_timers); + fprintf(file, "time_hz,meas_time_hz,timer_hz,meas_timer_hz,linux_hz\n"); + fprintf(file, "%lu,%lu,%lu,%lu,%lu\n", + eo_ctx->time_hz, + eo_ctx->meas_time_hz, + eo_ctx->test_hz, + eo_ctx->meas_test_hz, + eo_ctx->linux_hz); + + fprintf(file, "tmo_id,period_ns,period_ticks,ack_late"); + fprintf(file, ",start_tick,start_ns,first_ns,first\n"); + for (int i = 0; i < g_options.num_periodic; i++) { + fprintf(file, "%d,%lu,%lu,%lu,%lu,%lu,%lu,%lu\n", + i, eo_ctx->tmo_data[i].period_ns, + eo_ctx->tmo_data[i].ticks, + eo_ctx->tmo_data[i].ack_late, + eo_ctx->tmo_data[i].start, + eo_ctx->tmo_data[i].start_ts, + eo_ctx->tmo_data[i].first_ns, + eo_ctx->tmo_data[i].first); + } + + fprintf(file, "id,op,tick,time_ns,linux_time_ns,counter,core,timer\n"); + for (int c = 0; c < cores; c++) { + for (int i = 0; i < eo_ctx->cdat[c].count; i++) { + fprintf(file, "%d,%s,%lu,%lu,%lu,%d,%d,%d\n", + eo_ctx->cdat[c].trc[i].id, + op_labels[eo_ctx->cdat[c].trc[i].op], + eo_ctx->cdat[c].trc[i].tick, + eo_ctx->cdat[c].trc[i].ts, + eo_ctx->cdat[c].trc[i].linuxt, + eo_ctx->cdat[c].trc[i].count, + c, + eo_ctx->cdat[c].trc[i].tidx); + } + } + fprintf(file, "#END TRACE\n\n"); + if (file != stdout) + fclose(file); +} + +void show_global_stats(app_eo_ctx_t *eo_ctx) +{ + APPL_PRINT("\nTOTAL STATS:\n"); + APPL_PRINT(" Num tmo: %lu\n", eo_ctx->global_stat.num_tmo); + APPL_PRINT(" Num late ack: %lu", eo_ctx->global_stat.num_late); + APPL_PRINT(" (%lu %%)\n", + (eo_ctx->global_stat.num_late * 100) / eo_ctx->global_stat.num_tmo); + APPL_PRINT(" Max early arrival: %.1f us %s\n", + ((double)eo_ctx->global_stat.max_early_ns) / 1000.0, + (uint64_t)llabs(eo_ctx->global_stat.max_early_ns) > g_options.res_ns ? "!" : ""); + APPL_PRINT(" Max diff from tgt: %.1f us (res %.1f us) %s\n", + ((double)eo_ctx->global_stat.max_dev_ns) / 1000.0, + (double)g_options.res_ns / 1000.0, + (uint64_t)llabs(eo_ctx->global_stat.max_dev_ns) > (2 * g_options.res_ns) ? + ">2x res!" : ""); + APPL_PRINT(" Max CPU load: %d %%\n", eo_ctx->global_stat.max_cpu); + if (eo_ctx->global_stat.max_dispatch) + APPL_PRINT(" Max EO rcv time: %lu ns\n", eo_ctx->global_stat.max_dispatch); + APPL_PRINT("\n"); +} + +uint64_t random_tmo_ns(uint64_t minval, uint64_t maxval) +{ + if (maxval == 0) + maxval = g_options.max_period_ns; + if (minval == 0) + minval = g_options.min_period_ns; + + uint64_t r = random() % (maxval - minval + 1); + + return r + minval; /* ns between min/max */ +} + +uint64_t random_work_ns(rnd_state_t *rng) +{ + uint64_t r; + int32_t r1; + + random_r(&rng->rdata, &r1); + r = (uint64_t)r1; + if (r % 100 >= g_options.work_prop) /* probability of work roughly */ + return 0; + + random_r(&rng->rdata, &r1); + r = (uint64_t)r1 % (g_options.max_work_ns - g_options.min_work_ns + 1); + return r + g_options.min_work_ns; +} + +tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last) +{ + int cores = m_shm->core_count; + tmo_trace *trc = NULL; + int last_count = 0; + + for (int c = 0; c < cores; c++) { + for (int i = 0; i < eo_ctx->cdat[c].count; i++) { /* find id */ + if (eo_ctx->cdat[c].trc[i].op == OP_TMO && + eo_ctx->cdat[c].trc[i].id == id) { /* this TMO */ + if (eo_ctx->cdat[c].trc[i].count == count) + trc = &eo_ctx->cdat[c].trc[i]; + /* always run through for last_count */ + if (eo_ctx->cdat[c].trc[i].count > last_count) + last_count = eo_ctx->cdat[c].trc[i].count; + } + } + } + *last = last_count; + return trc; +} + +int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, + uint64_t *min, uint64_t *max, uint64_t *first, + int64_t *tgt_max, int64_t *max_early_ns, int *evnum) +{ + int num = 0; + uint64_t diff; + uint64_t prev = 0; + int last = 0; + int last_num; + uint64_t period_ns = eo_ctx->tmo_data[id].period_ns; + uint64_t start_ns = eo_ctx->tmo_data[id].start_ts; + int64_t max_tgt_diff = 0; + + *max = 0; + *min = INT64_MAX; + + /* find in sequential order for diff to work. TODO this gets very slow with many tmos */ + + for (int count = 1; count < g_options.tracebuf; count++) { + tmo_trace *tmo = find_tmo(eo_ctx, id, count, &last_num); + + if (!tmo) { + if (last != count - 1) + APPL_PRINT("MISSING TMO: id %d, count %d\n", id, count); + *tgt_max = max_tgt_diff; + return num; + } + last++; + if (count == 1) { /* first period may be different */ + uint64_t tgt = start_ns + eo_ctx->tmo_data[id].first_ns; + int64_t tgtdiff = (int64_t)tmo->ts - (int64_t)tgt; + + if (llabs(max_tgt_diff) < llabs(tgtdiff)) { + max_tgt_diff = tgtdiff; + *evnum = count; + } + if (tgtdiff < *max_early_ns) + *max_early_ns = tgtdiff; + + diff = tmo->ts - eo_ctx->tmo_data[id].start_ts; + *first = diff; + start_ns += eo_ctx->tmo_data[id].first_ns; /* from now constant period */ + } else { + diff = tmo->ts - prev; + /*skip last, could be while stopping */ + if (last_num > count && tmo->ts < eo_ctx->stopped) { + if (diff > *max) + *max = diff; + if (diff < *min) + *min = diff; + + /* calculate distance to target */ + uint64_t tgt = start_ns + (count - 1) * period_ns; + int64_t tgtdiff = (int64_t)tmo->ts - (int64_t)tgt; + + if (llabs(max_tgt_diff) < llabs(tgtdiff)) { + max_tgt_diff = tgtdiff; + *evnum = count; + } + if (tgtdiff < *max_early_ns) + *max_early_ns = tgtdiff; + } + } + prev = tmo->ts; + num++; + } + *tgt_max = max_tgt_diff; + return num; +} + +bool timing_statistics(app_eo_ctx_t *eo_ctx) +{ + /* basic statistics, more with offline tools (-w) */ + uint64_t max_ts = 0, min_ts = 0, first_ts = 0; + int64_t tgt_max = 0; + const int cores = m_shm->core_count; + uint64_t system_used = eo_ctx->stopped - eo_ctx->started; + bool stop_loops = false; + + for (int c = 0; c < cores; c++) { + core_data *cdat = &eo_ctx->cdat[c]; + uint64_t eo_used = cdat->acc_time; + double perc = (double)eo_used / (double)system_used * 100; + + if (perc > 100) + perc = 100; + APPL_PRINT("STAT_CORE [%d]: %d tmos, %d jobs, EO used %.1f %% CPU time\n", + c, cdat->count, cdat->jobs, perc); + if (perc > eo_ctx->global_stat.max_cpu) + eo_ctx->global_stat.max_cpu = round(perc); + eo_ctx->global_stat.num_tmo += cdat->count; + } + + for (int id = 0; id < g_options.num_periodic; id++) { /* each timeout */ + tmo_setup *tmo_data = &eo_ctx->tmo_data[id]; + int64_t max_early = 0; + int evnum = 0; + int num = do_one_tmo(id, eo_ctx, + &min_ts, &max_ts, &first_ts, &tgt_max, &max_early, &evnum); + + APPL_PRINT("STAT-TMO [%d]: %d tmos (tmr#%d), period %lu ns (", + id, num, tmo_data->tidx, tmo_data->period_ns); + if (num) { + int64_t maxdiff = (int64_t)max_ts - + (int64_t)tmo_data->period_ns; + int64_t mindiff = (int64_t)min_ts - + (int64_t)tmo_data->period_ns; + int64_t firstdiff = (int64_t)first_ts - + (int64_t)tmo_data->first_ns; + + APPL_PRINT("%lu ticks), interval %ld ns ... +%ld ns", + tmo_data->ticks, mindiff, maxdiff); + APPL_PRINT(" (%ld us ... +%ld us)\n", mindiff / 1000, maxdiff / 1000); + if (tmo_data->first_ns != tmo_data->period_ns) + APPL_PRINT(" - 1st period set %lu ns, was %ld ns (diff %.2f us)\n", + tmo_data->first_ns, first_ts, (double)firstdiff / 1000); + APPL_PRINT(" - Max diff from target %.2f us, ev #%d\n", + (double)tgt_max / 1000, evnum); + if (llabs(tgt_max) > llabs(eo_ctx->global_stat.max_dev_ns)) + eo_ctx->global_stat.max_dev_ns = tgt_max; + if (g_options.stop_limit && + ((uint64_t)llabs(tgt_max) > g_options.stop_limit)) + stop_loops = true; + if (max_early < eo_ctx->global_stat.max_early_ns) + eo_ctx->global_stat.max_early_ns = max_early; + } else { + APPL_PRINT(" ERROR - no timeouts received\n"); + if (g_options.stop_limit) + stop_loops = true; + } + } + + APPL_PRINT("Starting timeout loop took %lu us (%lu per tmo)\n", + eo_ctx->start_loop_ns / 1000, + eo_ctx->start_loop_ns / 1000 / g_options.num_periodic); + + if (!g_options.dispatch) + return stop_loops; + + /* + * g_options.dispatch set + * + * Calculate EO rcv min-max-avg: + */ + uint64_t min = UINT64_MAX, max = 0, avg = 0; + uint64_t prev_ts = 0; + int prev_count = 0; + int num = 0; + + for (int c = 0; c < cores; c++) { + for (int i = 0; i < g_options.tracebuf; i++) { + core_data *cdat = &eo_ctx->cdat[c]; + + if (cdat->trc[i].op == OP_PROF_ENTER_CB) { + prev_ts = cdat->trc[i].ts; + prev_count = cdat->trc[i].count; + } else if (cdat->trc[i].op == OP_PROF_EXIT_CB) { + uint64_t diff_ts; + uint64_t ns; + + if (prev_count != cdat->trc[i].count) + APPL_PRINT("No enter cnt=%d\n", prev_count); + + diff_ts = cdat->trc[i].ts - prev_ts; + ns = diff_ts; + + if (ns < min) + min = ns; + if (ns > max) + max = ns; + avg += ns; + num++; + } + } + } + + APPL_PRINT("%d dispatcher enter-exit samples\n", num); + APPL_PRINT("PROF-DISPATCH rcv time: min %lu ns, max %lu ns, avg %lu ns\n", + min, max, num > 0 ? avg / num : 0); + + if (max > eo_ctx->global_stat.max_dispatch) + eo_ctx->global_stat.max_dispatch = max; + + return stop_loops; +} + +void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx) +{ + uint64_t min = UINT64_MAX; + uint64_t max = 0, avg = 0, num = 0; + uint64_t t; + + for (int c = 0; c < cores; c++) { + for (int i = 0; i < g_options.tracebuf; i++) { + if (eo_ctx->cdat[c].trc[i].op == op) { + t = eo_ctx->cdat[c].trc[i].linuxt; + if (min > t) + min = t; + if (max < t) + max = t; + avg += t; + num++; + } + } + } + if (num) + APPL_PRINT("%-15s %-15lu %-15lu %-15lu %-15lu\n", op_labels[op], + num, min, max, avg / num); +} + +void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx) +{ + APPL_PRINT("API timing profiles:\n"); + APPL_PRINT("api count min max avg (ns)\n"); + APPL_PRINT("------------------------------------------------------------------------\n"); + profile_statistics(OP_PROF_CREATE, cores, eo_ctx); + profile_statistics(OP_PROF_SET, cores, eo_ctx); + profile_statistics(OP_PROF_ACK, cores, eo_ctx); + profile_statistics(OP_PROF_DELETE, cores, eo_ctx); + profile_statistics(OP_PROF_CANCEL, cores, eo_ctx); + profile_statistics(OP_PROF_TMR_CREATE, cores, eo_ctx); + profile_statistics(OP_PROF_TMR_DELETE, cores, eo_ctx); +} + +void analyze(app_eo_ctx_t *eo_ctx) +{ + int cores = m_shm->core_count; + int cancelled = 0; + int job_del = 0; + + bool stop = timing_statistics(eo_ctx); + + if (g_options.profile) + profile_all_stats(cores, eo_ctx); + + for (int c = 0; c < cores; c++) { + cancelled += eo_ctx->cdat[c].cancelled; + job_del += eo_ctx->cdat[c].jobs_deleted; + } + + show_global_stats(eo_ctx); + + /* write trace file */ + if (g_options.csv != NULL) + write_trace(eo_ctx, g_options.csv); + + APPL_PRINT("%d/%d timeouts were cancelled\n", cancelled, g_options.num_periodic); + + if (g_options.bg_events) + APPL_PRINT("%d/%d bg jobs were deleted\n", job_del, g_options.bg_events); + if (g_options.mz_mb) + APPL_PRINT("%lu memzeros\n", eo_ctx->mz_count); + double span = eo_ctx->stopped - eo_ctx->started; + + span /= 1000000000; + APPL_PRINT("Timer runtime %f s\n", span); + + test_fatal_if(cancelled != g_options.num_periodic, + "Not all tmos deleted (did not arrive at all?)\n"); + + if (stop) { + APPL_PRINT("STOP due to timing error larger than limit! (%f s)\n", + (double)g_options.stop_limit / 1000000000); + raise(SIGINT); + } +} + +int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx) +{ + int core = em_core_id(); + + if (eo_ctx->cdat[core].trc == NULL) + return 1; /* skip during early startup */ + + tmo_trace *tmo = &eo_ctx->cdat[core].trc[eo_ctx->cdat[core].count]; + + if (eo_ctx->cdat[core].count < g_options.tracebuf) { + if (op < OP_PROF_ACK && (tidx != -1)) /* to be a bit faster for profiling */ + tmo->tick = em_timer_current_tick(m_shm->test_tmr[tidx]); + tmo->op = op; + tmo->id = id; + tmo->ts = TIME_STAMP_FN(); + tmo->linuxt = ns; + tmo->count = count; + tmo->tidx = tidx; + eo_ctx->cdat[core].count++; + } + + return (eo_ctx->cdat[core].count >= g_options.trcstop) ? 0 : 1; +} + +void send_bg_events(app_eo_ctx_t *eo_ctx) +{ + for (int n = 0; n < g_options.bg_events; n++) { + em_event_t event = em_alloc(sizeof(app_msg_t), + EM_EVENT_TYPE_SW, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate bg event!\n"); + app_msg_t *msg = em_event_pointer(event); + + msg->command = CMD_BGWORK; + msg->count = 0; + msg->id = n + 1; + msg->arg = g_options.bg_time_ns; + test_fatal_if(em_send(event, eo_ctx->bg_q) != EM_OK, "Can't allocate bg event!\n"); + } +} + +void start_periodic(app_eo_ctx_t *eo_ctx) +{ + app_msg_t *msg; + em_event_t event; + em_tmo_t tmo; + em_tmo_flag_t flag = EM_TMO_FLAG_PERIODIC; + uint64_t t1 = 0; + uint64_t max_period = 0; + int tidx; + uint64_t first_same_tick = 0; + + if (g_options.noskip) + flag |= EM_TMO_FLAG_NOSKIP; + eo_ctx->stop_sent = 0; + eo_ctx->started = TIME_STAMP_FN(); + + for (int i = 0; i < g_options.num_periodic; i++) { + event = em_alloc(sizeof(app_msg_t), g_options.etype, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "Can't allocate test event (%ldB)!\n", + sizeof(app_msg_t)); + + msg = em_event_pointer(event); + msg->command = CMD_TMO; + msg->count = 0; + msg->id = i; + tidx = random() % g_options.num_timers; + msg->tidx = tidx; + + if (eo_ctx->tmo_data[i].handle == EM_TMO_UNDEF) { /* not -q */ + if (g_options.profile) + t1 = TIME_STAMP_FN(); + tmo = em_tmo_create(m_shm->test_tmr[tidx], flag, eo_ctx->test_q); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_CREATE, msg); + test_fatal_if(tmo == EM_TMO_UNDEF, "Can't allocate test_tmo!\n"); + eo_ctx->tmo_data[i].handle = tmo; + } + msg->tmo = eo_ctx->tmo_data[i].handle; + eo_ctx->tmo_data[i].tidx = tidx; + + uint64_t period_ticks; + uint64_t first = 0; + em_status_t stat; + + if (g_options.period_ns) { + eo_ctx->tmo_data[i].period_ns = g_options.period_ns; + } else { /* 0: use random */ + eo_ctx->tmo_data[i].period_ns = random_tmo_ns(0, 0); + } + if (max_period < eo_ctx->tmo_data[i].period_ns) + max_period = eo_ctx->tmo_data[i].period_ns; + period_ticks = em_timer_ns_to_tick(m_shm->test_tmr[tidx], + eo_ctx->tmo_data[i].period_ns); + + if (EXTRA_PRINTS && i == 0) + APPL_PRINT("Timer Hz %lu\n", eo_ctx->test_hz); + + test_fatal_if(period_ticks < 1, "timer resolution is too low!\n"); + + if (g_options.first_ns < 0) /* use random */ + eo_ctx->tmo_data[i].first_ns = random_tmo_ns(0, llabs(g_options.first_ns)); + else if (g_options.first_ns == 0) /* use period */ + eo_ctx->tmo_data[i].first_ns = eo_ctx->tmo_data[i].period_ns; + else + eo_ctx->tmo_data[i].first_ns = g_options.first_ns; + + first = em_timer_ns_to_tick(m_shm->test_tmr[tidx], eo_ctx->tmo_data[i].first_ns); + eo_ctx->tmo_data[i].ack_late = 0; + eo_ctx->tmo_data[i].ticks = period_ticks; + + /* store start time */ + eo_ctx->tmo_data[i].start_ts = TIME_STAMP_FN(); + eo_ctx->tmo_data[i].start = em_timer_current_tick(m_shm->test_tmr[tidx]); + first += eo_ctx->tmo_data[i].start; /* ticks from now */ + + if (i == 0) { /* save tick from first tmo */ + first_same_tick = first; + } + if (g_options.same_tick) { + first = first_same_tick; + /* this is not accurate, but makes summary analysis work better */ + eo_ctx->tmo_data[i].start_ts = eo_ctx->tmo_data[0].start_ts; + } + eo_ctx->tmo_data[i].first = first; + + if (g_options.profile) + t1 = TIME_STAMP_FN(); + stat = em_tmo_set_periodic(eo_ctx->tmo_data[i].handle, first, period_ticks, event); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_SET, msg); + + if (unlikely(stat != EM_OK)) { + if (EXTRA_PRINTS) { + em_timer_tick_t now = em_timer_current_tick(m_shm->test_tmr[tidx]); + + APPL_PRINT("FAILED to set tmo, stat=%d: first=%lu, ", stat, first); + APPL_PRINT("now %lu (diff %ld), period=%lu\n", + now, (int64_t)first - (int64_t)now, period_ticks); + APPL_PRINT("(first_ns %lu)\n", eo_ctx->tmo_data[i].first_ns); + } + test_fatal_if(1, "Can't activate test tmo!\n"); + } + } + + eo_ctx->start_loop_ns = TIME_STAMP_FN() - eo_ctx->started; + eo_ctx->max_period = max_period; + /* time window to detect possible late timeouts before cleanup */ + eo_ctx->cooloff = ((max_period / 1000000000ULL) * 2) + 1; + if (eo_ctx->cooloff < MIN_COOLOFF) + eo_ctx->cooloff = MIN_COOLOFF; /* HB periods (secs) */ +} + +void add_prof(app_eo_ctx_t *eo_ctx, uint64_t t1, e_op op, app_msg_t *msg) +{ + uint64_t dif = TIME_STAMP_FN() - t1; + int id, count; + + if (unlikely(msg == NULL)) { + id = -1; + count = -1; + } else { + id = msg->id; + count = msg->count; + } + + add_trace(eo_ctx, id, op, dif, count, -1); + /* if this filled the buffer it's handled on next tmo */ +} + +int handle_periodic(app_eo_ctx_t *eo_ctx, em_event_t event) +{ + int core = em_core_id(); + app_msg_t *msg = (app_msg_t *)em_event_pointer(event); + int reuse = 1; + e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); + uint64_t t1 = 0; + em_tmo_stats_t ctrs = { 0 }; /* init to avoid gcc warning with LTO */ + em_status_t ret; + + msg->count++; + + /* this is to optionally test abnormal exits only */ + if (unlikely(g_options.abort != 0) && abs(g_options.abort) <= msg->count) { + if (g_options.abort < 0) { /* cause segfault to test exception here */ + uint64_t *fault = NULL; + /* coverity[FORWARD_NULL] */ + msg->arg = *fault; + } else { + abort(); + } + } + + if (likely(state == STATE_RUN)) { /* add tmo trace */ + if (!add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx)) + send_stop(eo_ctx); /* triggers state change to stop */ + + if (unlikely(em_tmo_get_type(event, NULL, false) != EM_TMO_TYPE_PERIODIC)) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Unexpected, event is not tmo\n"); + + if (g_options.work_prop) { + uint64_t work = random_work_ns(&eo_ctx->cdat[core].rng); + + if (work) { /* add extra delay */ + uint64_t t2; + uint64_t ns = TIME_STAMP_FN(); + + do { + t2 = TIME_STAMP_FN(); + } while (t2 < (ns + work)); + add_trace(eo_ctx, msg->id, OP_WORK, work, msg->count, -1); + } + } + + /* only ack while in running state */ + add_trace(eo_ctx, msg->id, OP_ACK, 0, msg->count, msg->tidx); + if (g_options.profile) + t1 = TIME_STAMP_FN(); + em_status_t stat = em_tmo_ack(msg->tmo, event); + + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_ACK, msg); + if (unlikely(stat != EM_OK)) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "ack() fail!\n"); + + } else if (state == STATE_COOLOFF) { /* trace, but cancel */ + em_event_t tmo_event = EM_EVENT_UNDEF; + + add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx); + em_tmo_get_stats(msg->tmo, &ctrs); + APPL_PRINT("STAT-ACK [%d]: %lu acks, %lu late, %lu skips\n", + msg->id, ctrs.num_acks, ctrs.num_late_ack, ctrs.num_period_skips); + eo_ctx->tmo_data[msg->id].ack_late = ctrs.num_late_ack; + eo_ctx->global_stat.num_late += ctrs.num_late_ack; + + if (unlikely(msg->id >= g_options.num_periodic)) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Corrupted tmo msg?\n"); + + if (g_options.profile) + t1 = TIME_STAMP_FN(); + if (g_options.no_del) { /* don't delete each round */ + ret = em_tmo_cancel(msg->tmo, &tmo_event); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_CANCEL, msg); + test_fatal_if(ret == EM_OK, "tmo_cancel ok, expecting fail here!\n"); + } else { + if (em_tmo_get_state(msg->tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(msg->tmo, &tmo_event); + ret = em_tmo_delete(msg->tmo); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_DELETE, msg); + test_fatal_if(ret != EM_OK, "tmo_delete failed, ret %" PRI_STAT "!\n", ret); + eo_ctx->tmo_data[msg->id].handle = EM_TMO_UNDEF; + } + + eo_ctx->cdat[core].cancelled++; + if (unlikely(tmo_event != EM_EVENT_UNDEF)) { /* not expected as we have the event */ + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "periodic tmo delete returned evt!\n"); + } + add_trace(eo_ctx, msg->id, OP_CANCEL, 0, msg->count, msg->tidx); + reuse = 0; /* free this last tmo event of canceled tmo */ + } else { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Timeout in state %s!\n", state_labels[state]); + } + return reuse; +} + +void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, uint64_t tmrtick, + uint64_t timetick) +{ + uint64_t linux_t2 = linux_time_ns(); + uint64_t time_t2 = TIME_STAMP_FN(); + uint64_t tmr_t2 = em_timer_current_tick(m_shm->test_tmr[0]); + + linux_t2 = linux_t2 - linuxns; + time_t2 = time_t2 - timetick; + tmr_t2 = tmr_t2 - tmrtick; + APPL_PRINT("%lu timer ticks in %lu ns (linux time) ", tmr_t2, linux_t2); + double hz = 1000000000 / + ((double)linux_t2 / (double)tmr_t2); + APPL_PRINT("=> %.1f Hz (%.1f MHz). Timer reports %lu Hz\n", + hz, hz / 1000000, eo_ctx->test_hz); + eo_ctx->meas_test_hz = round(hz); + hz = 1000000000 / ((double)linux_t2 / (double)time_t2); + APPL_PRINT("Timestamp measured: %.1f Hz (%.1f MHz)\n", hz, hz / 1000000); + eo_ctx->meas_time_hz = round(hz); + + test_fatal_if(tmr_t2 < 1, "TIMER SEEMS NOT RUNNING AT ALL!?"); +} + +int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx) +{ + static int count; + + add_trace(eo_ctx, -1, OP_MEMZERO, g_options.mz_mb, msg->count, -1); + if (eo_ctx->mz_data == NULL) { /* first time we only allocate */ + if (g_options.mz_huge) { + eo_ctx->mz_data = mmap(NULL, g_options.mz_mb * 1024UL * 1024UL, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | + MAP_HUGETLB | MAP_LOCKED, + -1, 0); + if (eo_ctx->mz_data == MAP_FAILED) + eo_ctx->mz_data = NULL; + } else { + eo_ctx->mz_data = malloc(g_options.mz_mb * 1024UL * 1024UL); + } + test_fatal_if(eo_ctx->mz_data == NULL, "mz_mem reserve failed!"); + } else { + memset(eo_ctx->mz_data, 0, g_options.mz_mb * 1024UL * 1024UL); + eo_ctx->mz_count++; + } + add_trace(eo_ctx, -1, OP_MEMZERO_END, g_options.mz_mb, count, -1); + __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED); + return 0; +} + +int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx) +{ + app_msg_t *msg = (app_msg_t *)em_event_pointer(evt); + uint64_t t1 = TIME_STAMP_FN(); + uint64_t ts; + int32_t rnd; + int core = em_core_id(); + uint64_t sum = 0; + + if (__atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE) != STATE_RUN) { + eo_ctx->cdat[core].jobs_deleted++; + if (EXTRA_PRINTS) + APPL_PRINT("Deleting job after %u iterations\n", msg->count); + return 0; /* stop & delete */ + } + + if (g_options.jobs) + add_trace(eo_ctx, -1, OP_BGWORK, msg->arg, msg->count, -1); + + msg->count++; + eo_ctx->cdat[core].jobs++; + int blocks = g_options.bg_size / g_options.bg_chunk; + + random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); + rnd = rnd % blocks; + uint64_t *dptr = (uint64_t *)((uintptr_t)eo_ctx->bg_data + rnd * g_options.bg_chunk); + + do { + /* jump around memory reading from selected chunk */ + random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); + rnd = rnd % (g_options.bg_chunk / sizeof(uint64_t)); + sum += *(dptr + rnd); + ts = TIME_STAMP_FN() - t1; + } while (ts < msg->arg); + + *dptr = sum; + + if (g_options.mz_mb && msg->id == 1) { /* use only one job stream for memzero */ + static uint64_t last_mz; + + if (msg->count < 10) /* don't do mz before some time */ + last_mz = TIME_STAMP_FN(); + ts = TIME_STAMP_FN() - last_mz; + if (ts > g_options.mz_ns) { + do_memzero(msg, eo_ctx); + last_mz = TIME_STAMP_FN(); + } + } + + test_fatal_if(em_send(evt, eo_ctx->bg_q) != EM_OK, "Failed to send BG job event!"); + return 1; +} + +void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event) +{ + app_msg_t *msg = (app_msg_t *)em_event_pointer(event); + int cores = m_shm->core_count; + int done = 0; + e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_SEQ_CST); + static int runs; + static uint64_t linuxns; + static uint64_t tmrtick; + static uint64_t timetick; + static bool startup = true; + + /* heartbeat runs states of the test */ + + if (startup) { + first_timer_create(eo_ctx); + startup = false; + } + + msg->count++; + add_trace(eo_ctx, -1, OP_HB, linux_time_ns(), msg->count, -1); + + if (EXTRA_PRINTS) + APPL_PRINT("."); + + switch (state) { + case STATE_INIT: + if (msg->count > eo_ctx->last_hbcount + INIT_WAIT) { + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + eo_ctx->last_hbcount = msg->count; + APPL_PRINT("ROUND %d\n", runs + 1); + APPL_PRINT("->Starting tick measurement\n"); + } + break; + + case STATE_MEASURE: /* measure timer frequencies */ + if (linuxns == 0) { + linuxns = linux_time_ns(); + timetick = TIME_STAMP_FN(); + /* use timer[0] for this always */ + tmrtick = em_timer_current_tick(m_shm->test_tmr[0]); + } + if (msg->count > eo_ctx->last_hbcount + MEAS_PERIOD) { + analyze_measure(eo_ctx, linuxns, tmrtick, timetick); + linuxns = 0; + /* start new run */ + if (g_options.num_runs > 1) + APPL_PRINT("** Round %d\n", runs + 1); + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + } + break; + + case STATE_STABILIZE: /* give some time to get up */ + if (g_options.bg_events) + send_bg_events(eo_ctx); + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + if (EXTRA_PRINTS) + APPL_PRINT("->Starting tmos\n"); + start_periodic(eo_ctx); + eo_ctx->last_hbcount = msg->count; + break; + + case STATE_RUN: /* run the test, avoid prints */ + for (int i = 0; i < cores; i++) { + if (eo_ctx->cdat[i].count >= + g_options.tracebuf) { + done++; + break; + } + } + if (done) { + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + eo_ctx->last_hbcount = msg->count; + if (EXTRA_PRINTS) + APPL_PRINT("->All cores done\n"); + } + break; + + case STATE_COOLOFF: /* stop further timeouts */ + if (msg->count > (eo_ctx->last_hbcount + eo_ctx->cooloff)) { + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + eo_ctx->last_hbcount = msg->count; + if (EXTRA_PRINTS) + APPL_PRINT("->Starting analyze\n"); + } + break; + + case STATE_ANALYZE: /* expected to be stopped, analyze data */ + APPL_PRINT("\n"); + if (g_options.recreate) + delete_timers(eo_ctx); + analyze(eo_ctx); + cleanup(eo_ctx); + /* re-start test cycle */ + __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); + runs++; + if (runs >= g_options.num_runs && g_options.num_runs != 0) { + /* terminate test app */ + APPL_PRINT("%d runs done\n", runs); + raise(SIGINT); + } + eo_ctx->last_hbcount = msg->count; + if (g_options.recreate) + create_timers(eo_ctx); + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid test state"); + } + + /* heartbeat never stops */ + if (em_tmo_ack(eo_ctx->heartbeat_tmo, event) != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "HB ack() fail!\n"); +} + +void usage(void) +{ + printf("%s\n", instructions); + + printf("Usage:\n"); + for (int i = 0; ; i++) { + if (longopts[i].name == NULL || descopts[i] == NULL) + break; + printf("--%s or -%c: %s\n", longopts[i].name, longopts[i].val, descopts[i]); + } +} + +int parse_my_args(int first, int argc, char *argv[]) +{ + optind = first + 1; /* skip '--' */ + while (1) { + int opt; + int long_index; + char *endptr; + int64_t num; + + opt = getopt_long(argc, argv, shortopts, longopts, &long_index); + + if (opt == -1) + break; /* No more options */ + + switch (opt) { + case 's': { + g_options.noskip = 0; + } + break; + case 'a': { + g_options.profile = 1; + } + break; + case 'b': { + g_options.jobs = 1; + } + break; + case 'd': { + g_options.dispatch = 1; + } + break; + case 'i': { + g_options.info_only = 1; + } + break; + case 'u': { + g_options.usehuge = 1; + } + break; + case 'q': { + g_options.no_del = 1; + } + break; + case 'S': { + g_options.same_tick = 1; + } + break; + case 'R': { + g_options.recreate = 1; + } + break; + case 'w': { /* optional arg */ + g_options.csv = "stdout"; + if (optarg != NULL) + g_options.csv = optarg; + } + break; + case 'm': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 1) + return 0; + g_options.max_period_ns = (uint64_t)num; + } + break; + case 'L': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 0) + return 0; + g_options.stop_limit = (uint64_t)num; + } + break; + case 'l': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 1) + return 0; + g_options.min_period_ns = num; + } + break; + case 't': { + unsigned long size, perc; + + num = sscanf(optarg, "%lu,%lu", &size, &perc); + if (num == 0 || size < 10 || + sizeof(tmo_trace) * size > MAX_TMO_BYTES) + return 0; + g_options.tracebuf = size; + if (num == 2 && perc > 100) + return 0; + if (num == 2) + g_options.trcstop = ((perc * size) / 100); + else + g_options.trcstop = ((STOP_THRESHOLD * size) / 100); + } + break; + case 'e': { + unsigned int min_us, max_us, prop; + + if (sscanf(optarg, "%u,%u,%u", &min_us, &max_us, &prop) != 3) + return 0; + if (prop > 100 || max_us < 1) + return 0; + g_options.min_work_ns = 1000ULL * min_us; + g_options.max_work_ns = 1000ULL * max_us; + g_options.work_prop = prop; + } + break; + case 'o': { + unsigned int mb; + uint64_t ms; + unsigned int hp = 0; + + if (sscanf(optarg, "%u,%lu,%u", &mb, &ms, &hp) < 2) + return 0; + if (mb < 1 || ms < 1) + return 0; + g_options.mz_mb = mb; + g_options.mz_ns = ms * 1000UL * 1000UL; + if (hp) + g_options.mz_huge = 1; + } + break; + case 'j': { + unsigned int evts, us, kb, chunk; + + num = sscanf(optarg, "%u,%u,%u,%u", &evts, &us, &kb, &chunk); + if (num == 0 || evts < 1) + return 0; + g_options.bg_events = evts; + if (num > 1 && us) + g_options.bg_time_ns = us * 1000ULL; + if (num > 2 && kb) + g_options.bg_size = kb * 1024; + if (num > 3 && chunk) + g_options.bg_chunk = chunk * 1024; + if (g_options.bg_chunk > g_options.bg_size) + return 0; + } + break; + case 'n': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.num_periodic = num; + } + break; + case 'p': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 0) + return 0; + g_options.period_ns = num; + } + break; + case 'f': { + if (!arg_to_ns(optarg, &num)) + return 0; + g_options.first_ns = num; + } + break; + case 'c': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.clock_src = num; + } + break; + case 'r': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 0) + return 0; + g_options.res_ns = num; + } + break; + case 'z': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.res_hz = num; + g_options.res_ns = 0; + } + break; + case 'x': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.num_runs = num; + } + break; + case 'k': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0') + return 0; + g_options.abort = num; + } + break; + + case 'y': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.num_timers = num; + } + break; + + case 'g': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.etype = (em_event_type_t)num; + } + break; + + case 'h': + default: + opterr = 0; + usage(); + return 0; + } + } + + optind = 1; /* cm_setup() to parse again */ + return 1; +} + +/** + * Before EM - Init + */ +void test_init(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + /* first core creates ShMem */ + if (core == 0) { + m_shm = env_shared_reserve("Timer_test", sizeof(timer_app_shm_t)); + /* initialize it */ + if (m_shm) + memset(m_shm, 0, sizeof(timer_app_shm_t)); + + if (EXTRA_PRINTS) + APPL_PRINT("%ldk shared memory for app context\n", + sizeof(timer_app_shm_t) / 1000); + + } else { + m_shm = env_shared_lookup("Timer_test"); + } + + if (m_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "ShMem init failed on EM-core: %u", + em_core_id()); + } + + APPL_PRINT("core %d: %s done\n", core, __func__); +} + +/** + * Startup of the timer test EM application + */ +void test_start(const appl_conf_t *appl_conf) +{ + em_eo_t eo; + em_queue_t queue; + em_status_t stat; + em_timer_attr_t attr; + app_eo_ctx_t *eo_ctx; + em_timer_res_param_t res_capa; + em_timer_capability_t capa = { 0 }; /* init to avoid gcc warning with LTO */ + em_core_mask_t mask; + em_queue_group_t grp; + em_atomic_group_t agrp; + + /* Store the number of EM-cores running the application */ + m_shm->core_count = appl_conf->core_count; + + if (appl_conf->num_procs > 1) { + APPL_PRINT("\n!! Multiple PROCESS MODE NOT SUPPORTED !!\n\n"); + raise(SIGINT); + return; + } + + if (appl_conf->num_pools >= 1) + m_shm->pool = appl_conf->pools[0]; + else + m_shm->pool = EM_POOL_DEFAULT; + + eo_ctx = &m_shm->eo_context; + memset(eo_ctx, 0, sizeof(app_eo_ctx_t)); + eo_ctx->tmo_data = calloc(g_options.num_periodic, sizeof(tmo_setup)); + test_fatal_if(eo_ctx->tmo_data == NULL, "Can't alloc tmo_setups"); + + eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, + app_eo_stop, app_eo_stop_local, app_eo_receive, + eo_ctx); + test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); + + stat = em_register_error_handler(my_error_handler); + test_fatal_if(stat != EM_OK, "Failed to register error handler"); + + /* Create atomic group and queues for control messages */ + stat = em_queue_group_get_mask(EM_QUEUE_GROUP_DEFAULT, &mask); + test_fatal_if(stat != EM_OK, "Failed to get default Q grp mask!"); + grp = em_queue_group_create_sync("CTRL_GRP", &mask); + test_fatal_if(grp == EM_QUEUE_GROUP_UNDEF, "Failed to create Q grp!"); + agrp = em_atomic_group_create("CTRL_AGRP", grp); + test_fatal_if(agrp == EM_ATOMIC_GROUP_UNDEF, "Failed to create atomic grp!"); + eo_ctx->agrp = agrp; + + queue = em_queue_create_ag("Control Q", EM_QUEUE_PRIO_NORMAL, agrp, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create hb queue!"); + eo_ctx->hb_q = queue; + + /* Highest priority queue for stop msg to handle tmo overload */ + queue = em_queue_create_ag("Stop Q", EM_QUEUE_PRIO_HIGHEST, agrp, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create stop queue!"); + eo_ctx->stop_q = queue; + + /* parallel high priority for timeout handling*/ + queue = em_queue_create("Tmo Q", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_HIGH, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create queue!"); + eo_ctx->test_q = queue; + + /* another parallel low priority for background work*/ + queue = em_queue_create("BG Q", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_LOWEST, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to add queue!"); + eo_ctx->bg_q = queue; + + /* create two timers so HB and tests can be independent */ + em_timer_attr_init(&attr); + strncpy(attr.name, "HBTimer", EM_TIMER_NAME_LEN); + m_shm->hb_tmr = em_timer_create(&attr); + test_fatal_if(m_shm->hb_tmr == EM_TIMER_UNDEF, + "Failed to create HB timer!"); + + /* test timer preparation, timer(s) created later */ + test_fatal_if(g_options.res_ns && g_options.res_hz, "Give resolution in ns OR hz!"); + test_fatal_if(g_options.recreate && g_options.no_del, "Can't keep tmo but delete timers!"); + test_fatal_if(g_options.same_tick && (g_options.num_timers > 1), + "Same tick currently supports only one timer"); + test_fatal_if(g_options.same_tick && (g_options.first_ns < 0), + "Same tick (-S) can't do random first timeout"); + test_fatal_if(g_options.num_timers > MAX_TEST_TIMERS, "Too many test timers"); + + em_timer_attr_init(&eo_ctx->tmr_attr); + stat = em_timer_capability(&capa, g_options.clock_src); + + APPL_PRINT("Timer capability for clksrc %d:\n", g_options.clock_src); + APPL_PRINT(" maximum timers: %d\n", capa.max_timers); + APPL_PRINT(" max_res %lu ns %lu hz min_tmo %lu max_tmo %lu\n", + capa.max_res.res_ns, capa.max_res.res_hz, + capa.max_res.min_tmo, capa.max_res.max_tmo); + APPL_PRINT(" max_tmo %lu ns %lu hz min_tmo %lu max_tmo %lu\n", + capa.max_tmo.res_ns, capa.max_tmo.res_hz, + capa.max_tmo.min_tmo, capa.max_tmo.max_tmo); + + test_fatal_if(stat != EM_OK, "Given clk_src is not supported\n"); + memset(&res_capa, 0, sizeof(em_timer_res_param_t)); + if (!g_options.res_hz) { + res_capa.res_ns = g_options.res_ns == 0 ? capa.max_res.res_ns : g_options.res_ns; + APPL_PRINT("Trying %lu ns resolution capability on clk %d\n", + res_capa.res_ns, g_options.clock_src); + } else { + res_capa.res_hz = g_options.res_hz; + APPL_PRINT("Trying %lu Hz resolution capability on clk %d\n", + res_capa.res_hz, g_options.clock_src); + } + + APPL_PRINT("Asking timer capability for clksrc %d:\n", g_options.clock_src); + APPL_PRINT("%lu ns %lu hz min_tmo %lu max_tmo %lu\n", + res_capa.res_ns, res_capa.res_hz, + res_capa.min_tmo, res_capa.max_tmo); + stat = em_timer_res_capability(&res_capa, g_options.clock_src); + APPL_PRINT("-> Timer res_capability:\n"); + APPL_PRINT("max_res %lu ns %lu hz min_tmo %lu max_tmo %lu\n", + res_capa.res_ns, res_capa.res_hz, + res_capa.min_tmo, res_capa.max_tmo); + test_fatal_if(stat != EM_OK, "Given resolution is not supported (ret %d)\n", stat); + + if (!g_options.max_period_ns) { + g_options.max_period_ns = DEF_MAX_PERIOD; + if (g_options.max_period_ns > res_capa.max_tmo) + g_options.max_period_ns = res_capa.max_tmo; + } + if (!g_options.min_period_ns) { + g_options.min_period_ns = res_capa.res_ns * DEF_MIN_PERIOD; + if (g_options.min_period_ns < res_capa.min_tmo) + g_options.min_period_ns = res_capa.min_tmo; + } + if (g_options.first_ns && (uint64_t)llabs(g_options.first_ns) < g_options.min_period_ns) { + if (g_options.first_ns < 0) + g_options.first_ns = 0 - g_options.min_period_ns; + else + g_options.first_ns = g_options.min_period_ns; + APPL_PRINT("NOTE: First period too short, updated to %ld ns\n", g_options.first_ns); + } + + eo_ctx->tmr_attr.resparam = res_capa; + if (g_options.res_hz) /* can only have one */ + eo_ctx->tmr_attr.resparam.res_ns = 0; + else + eo_ctx->tmr_attr.resparam.res_hz = 0; + eo_ctx->tmr_attr.num_tmo = g_options.num_periodic; + eo_ctx->tmr_attr.resparam.max_tmo = g_options.max_period_ns + + eo_ctx->tmr_attr.resparam.min_tmo; + strncpy(eo_ctx->tmr_attr.name, "TestTimer", EM_TIMER_NAME_LEN); + g_options.res_ns = eo_ctx->tmr_attr.resparam.res_ns; + + /* Start EO */ + stat = em_eo_start_sync(eo, NULL, NULL); + test_fatal_if(stat != EM_OK, "Failed to start EO!"); + + if (g_options.info_only) { /* signal stop here */ + raise(SIGINT); + } + + mlockall(MCL_FUTURE); +} + +void +create_timers(app_eo_ctx_t *eo_ctx) +{ + uint64_t t1 = 0; + + for (int i = 0; i < g_options.num_timers; i++) { + if (g_options.profile) + t1 = TIME_STAMP_FN(); + m_shm->test_tmr[i] = em_timer_create(&eo_ctx->tmr_attr); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_TMR_CREATE, NULL); + test_fatal_if(m_shm->test_tmr[i] == EM_TIMER_UNDEF, + "Failed to create test timer #%d!", i + 1); + } + APPL_PRINT("%d test timers created\n", g_options.num_timers); +} + +void +delete_timers(app_eo_ctx_t *eo_ctx) +{ + uint64_t t1 = 0; + + for (int i = 0; i < g_options.num_timers; i++) { + if (m_shm->test_tmr[i] != EM_TIMER_UNDEF) { + em_status_t ret; + + if (g_options.profile) + t1 = TIME_STAMP_FN(); + ret = em_timer_delete(m_shm->test_tmr[i]); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_TMR_DELETE, NULL); + test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", + m_shm->test_tmr[i], ret); + m_shm->test_tmr[i] = EM_TIMER_UNDEF; + } + } + APPL_PRINT("%d test timers deleted\n", g_options.num_timers); +} + +void +first_timer_create(app_eo_ctx_t *eo_ctx) +{ + em_timer_t tmr[PRINT_MAX_TMRS]; + em_timer_attr_t attr; + + create_timers(eo_ctx); + + int num_timers = em_timer_get_all(tmr, PRINT_MAX_TMRS); + + test_fatal_if(num_timers < 2, "Not all timers created"); + + for (int i = 0; i < (num_timers > PRINT_MAX_TMRS ? PRINT_MAX_TMRS : num_timers); i++) { + if (em_timer_get_attr(tmr[i], &attr) != EM_OK) { + APPL_ERROR("Can't get timer info\n"); + return; + } + APPL_PRINT("Timer \"%s\" info:\n", attr.name); + APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); + APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); + APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); + APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); + APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", + em_timer_get_freq(tmr[i])); + } + + eo_ctx->test_hz = em_timer_get_freq(m_shm->test_tmr[0]); /* use timer[0] */ + test_fatal_if(eo_ctx->test_hz == 0, + "get_freq() failed, timer:%" PRI_TMR "", m_shm->test_tmr[0]); +} + +void test_stop(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + const int core = em_core_id(); + em_status_t ret; + em_eo_t eo; + + if (appl_conf->num_procs > 1) { + APPL_PRINT("%s(): skip\n", __func__); + return; + } + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + eo = em_eo_find(APP_EO_NAME); + test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + + if (g_options.info_only) + return; + + ret = em_timer_delete(m_shm->hb_tmr); + test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", + m_shm->hb_tmr, ret); + delete_timers(&m_shm->eo_context); + free(m_shm->eo_context.tmo_data); +} + +void test_term(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (m_shm != NULL) { + em_unregister_error_handler(); + env_shared_free(m_shm); + m_shm = NULL; + } +} + +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + app_msg_t *msg; + struct timespec ts; + uint64_t period; + em_event_t event; + app_eo_ctx_t *eo_ctx = (app_eo_ctx_t *)eo_context; + + (void)eo; + (void)conf; + + eo_ctx->appstart = TIME_STAMP_FN(); + + if (g_options.info_only) + return EM_OK; + + APPL_PRINT("\nActive run options:\n"); + APPL_PRINT(" num timers: %d\n", g_options.num_timers); + APPL_PRINT(" num timeouts: %d\n", g_options.num_periodic); + if (g_options.res_hz) { + APPL_PRINT(" resolution: %lu Hz (%f MHz)\n", g_options.res_hz, + (double)g_options.res_hz / 1000000); + } else { + APPL_PRINT(" resolution: %lu ns (%f s)\n", g_options.res_ns, + (double)g_options.res_ns / 1000000000); + } + if (g_options.period_ns == 0) + APPL_PRINT(" period: random\n"); + else + APPL_PRINT(" period: %lu ns (%f s%s)\n", g_options.period_ns, + (double)g_options.period_ns / 1000000000, + g_options.period_ns == 0 ? " (random)" : ""); + if (g_options.first_ns < 0) + APPL_PRINT(" first period: random up to %lld ns\n", llabs(g_options.first_ns)); + else + APPL_PRINT(" first period: %ld ns (%fs%s)\n", g_options.first_ns, + (double)g_options.first_ns / 1000000000, + g_options.first_ns == 0 ? " (=period)" : ""); + APPL_PRINT(" max period: %lu ns (%f s)\n", g_options.max_period_ns, + (double)g_options.max_period_ns / 1000000000); + APPL_PRINT(" min period: %lu ns (%f s)\n", g_options.min_period_ns, + (double)g_options.min_period_ns / 1000000000); + if (g_options.num_runs > 1) + APPL_PRINT(" diff err lim: %lu ns (%f s)\n", g_options.stop_limit, + (double)g_options.stop_limit / 1000000000); + APPL_PRINT(" csv: %s\n", + g_options.csv == NULL ? "(no)" : g_options.csv); + APPL_PRINT(" tracebuffer: %d events (%luKiB)\n", + g_options.tracebuf, + g_options.tracebuf * sizeof(tmo_trace) / 1024); + APPL_PRINT(" stop limit: %d events\n", g_options.trcstop); + APPL_PRINT(" use NOSKIP: %s\n", g_options.noskip ? "yes" : "no"); + APPL_PRINT(" profile API: %s\n", g_options.profile ? "yes" : "no"); + APPL_PRINT(" dispatch prof:%s\n", g_options.dispatch ? "yes" : "no"); + APPL_PRINT(" work probability:%u %%\n", g_options.work_prop); + if (g_options.work_prop) { + APPL_PRINT(" min_work: %luns\n", g_options.min_work_ns); + APPL_PRINT(" max_work: %luns\n", g_options.max_work_ns); + } + APPL_PRINT(" bg events: %u\n", g_options.bg_events); + eo_ctx->bg_data = NULL; + if (g_options.bg_events) { + APPL_PRINT(" bg work: %lu us\n", g_options.bg_time_ns / 1000); + APPL_PRINT(" bg data: %u kiB\n", g_options.bg_size / 1024); + APPL_PRINT(" bg chunk: %u kiB (%u blks)\n", + g_options.bg_chunk / 1024, + g_options.bg_size / g_options.bg_chunk); + APPL_PRINT(" bg trace: %s\n", g_options.jobs ? "yes" : "no"); + + eo_ctx->bg_data = malloc(g_options.bg_size); + test_fatal_if(eo_ctx->bg_data == NULL, + "Can't allocate bg work data (%dkiB)!\n", + g_options.bg_size / 1024); + } + APPL_PRINT(" memzero: "); + if (g_options.mz_mb) + APPL_PRINT("%u MB %severy %lu ms\n", + g_options.mz_mb, + g_options.mz_huge ? "(mmap huge) " : "", + g_options.mz_ns / 1000000UL); + else + APPL_PRINT("no\n"); + + if (g_options.abort != 0) { + APPL_PRINT(" abort after: "); + if (g_options.abort) + APPL_PRINT("%d%s\n", + g_options.abort, g_options.abort < 0 ? "(segfault)" : ""); + else + APPL_PRINT("0 (no)\n"); + } + if (g_options.num_runs != 1) { + APPL_PRINT(" delete tmos: %s\n", g_options.no_del ? "no" : "yes"); + APPL_PRINT(" recreate tmr: %s\n", g_options.recreate ? "yes" : "no"); + } + if (g_options.etype != EM_EVENT_TYPE_SW) + APPL_PRINT(" using evtype: %u (0x%X)\n", g_options.etype, g_options.etype); + APPL_PRINT(" same start tick: %s", g_options.same_tick ? "yes" : "no"); + + APPL_PRINT("\nTracing first %d tmo events\n", g_options.tracebuf); + + if (g_options.bg_events) + prefault(eo_ctx->bg_data, g_options.bg_size); + + /* create periodic timeout for heartbeat */ + eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->hb_tmr, EM_TMO_FLAG_PERIODIC, eo_ctx->hb_q); + test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, + "Can't allocate heartbeat_tmo!\n"); + + event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", + sizeof(app_msg_t)); + + msg = em_event_pointer(event); + msg->command = CMD_HEARTBEAT; + msg->count = 0; + msg->id = -1; + eo_ctx->hb_hz = em_timer_get_freq(m_shm->hb_tmr); + if (eo_ctx->hb_hz < 10) + APPL_ERROR("WARNING: HB timer hz very low!\n"); + else + APPL_PRINT("HB timer frequency is %lu\n", eo_ctx->hb_hz); + + period = eo_ctx->hb_hz; /* 1s */ + test_fatal_if(period < 1, "timer resolution is too low!\n"); + + /* linux time check */ + test_fatal_if(clock_getres(CLOCK_MONOTONIC, &ts) != 0, + "clock_getres() failed!\n"); + + period = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); + eo_ctx->linux_hz = 1000000000ULL / period; + APPL_PRINT("Linux reports clock running at %" PRIu64 " hz\n", eo_ctx->linux_hz); + APPL_PRINT("ODP says time_global runs at %lu Hz\n", odp_time_global_res()); + eo_ctx->time_hz = odp_time_global_res(); + + /* start heartbeat */ + __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); + + em_status_t stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, eo_ctx->hb_hz, event); + + if (EXTRA_PRINTS && stat != EM_OK) + APPL_PRINT("FAILED to set HB tmo, stat=%d: period=%lu\n", stat, eo_ctx->hb_hz); + test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); + + stat = em_dispatch_register_enter_cb(enter_cb); + test_fatal_if(stat != EM_OK, "enter_cb() register failed!"); + stat = em_dispatch_register_exit_cb(exit_cb); + test_fatal_if(stat != EM_OK, "exit_cb() register failed!"); + + srandom(time(NULL)); + if (g_options.max_work_ns > RAND_MAX || + g_options.max_period_ns > RAND_MAX) { + double s = (double)RAND_MAX / (double)eo_ctx->test_hz; + + APPL_PRINT("WARNING: rnd number range is less than max values (up to %.4fs)\n", s); + } + if (EXTRA_PRINTS) + APPL_PRINT("WARNING: extra prints enabled, expect some jitter\n"); + + return EM_OK; +} + +/** + * @private + * + * EO per thread start function. + */ +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + int core = em_core_id(); + + (void)eo; + + if (EXTRA_PRINTS) + APPL_PRINT("EO local start\n"); + test_fatal_if(core >= MAX_CORES, "Too many cores!"); + eo_ctx->cdat[core].trc = allocate_tracebuf(g_options.tracebuf, sizeof(tmo_trace), + &eo_ctx->cdat[core].trc_size); + test_fatal_if(eo_ctx->cdat[core].trc == NULL, "Failed to allocate trace buffer!"); + eo_ctx->cdat[core].count = 0; + eo_ctx->cdat[core].cancelled = 0; + eo_ctx->cdat[core].jobs_deleted = 0; + eo_ctx->cdat[core].jobs = 0; + + memset(&eo_ctx->cdat[core].rng, 0, sizeof(rnd_state_t)); + initstate_r(time(NULL), eo_ctx->cdat[core].rng.rndstate, RND_STATE_BUF, + &eo_ctx->cdat[core].rng.rdata); + srandom_r(time(NULL), &eo_ctx->cdat[core].rng.rdata); + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + em_event_t event = EM_EVENT_UNDEF; + em_status_t ret; + + if (EXTRA_PRINTS) + APPL_PRINT("EO stop\n"); + + if (g_options.info_only) + return EM_OK; + + if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { + if (em_tmo_get_state(eo_ctx->heartbeat_tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->heartbeat_tmo, &event); + em_tmo_delete(eo_ctx->heartbeat_tmo); + eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; + if (event != EM_EVENT_UNDEF) + em_free(event); + } + + /* cancel all test timers in case test didn't complete */ + int dcount = 0; + + for (int i = 0; i < g_options.num_periodic; i++) { + if (eo_ctx->tmo_data[i].handle != EM_TMO_UNDEF) { + event = EM_EVENT_UNDEF; + if (em_tmo_get_state(eo_ctx->tmo_data[i].handle) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->tmo_data[i].handle, &event); + em_tmo_delete(eo_ctx->tmo_data[i].handle); + eo_ctx->tmo_data[i].handle = EM_TMO_UNDEF; + if (event != EM_EVENT_UNDEF) + em_free(event); + dcount++; + } + } + if (dcount) + APPL_PRINT("NOTE: deleted %d still active tmos\n", dcount); + + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); /* remove and delete */ + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); + + ret = em_atomic_group_delete(((app_eo_ctx_t *)eo_context)->agrp); + test_fatal_if(ret != EM_OK, + "EO remove atomic grp:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); + + ret = em_dispatch_unregister_enter_cb(enter_cb); + test_fatal_if(ret != EM_OK, "enter_cb() unregister:%" PRI_STAT, ret); + ret = em_dispatch_unregister_exit_cb(exit_cb); + test_fatal_if(ret != EM_OK, "exit_cb() unregister:%" PRI_STAT, ret); + + if (eo_ctx->bg_data != NULL) + free(eo_ctx->bg_data); + eo_ctx->bg_data = NULL; + if (eo_ctx->mz_data != NULL) { + if (g_options.mz_huge) + munmap(eo_ctx->mz_data, g_options.mz_mb * 1024UL * 1024UL); + else + free(eo_ctx->mz_data); + + eo_ctx->mz_data = NULL; + } + + double rt = TIME_STAMP_FN() - eo_ctx->appstart; + + APPL_PRINT("EO runtime was %.2f min\n", rt / 1e9 / 60); + return EM_OK; +} + +/** + * @private + * + * EO stop local function. + */ +static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo) +{ + int core = em_core_id(); + app_eo_ctx_t *const eo_ctx = eo_context; + + (void)eo; + + if (EXTRA_PRINTS) + APPL_PRINT("EO local stop\n"); + free_tracebuf(eo_ctx->cdat[core].trc, eo_ctx->cdat[core].trc_size); + eo_ctx->cdat[core].trc = NULL; + return EM_OK; +} + +/** + * @private + * + * EO receive function + */ +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, + void *q_context) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + int reuse = 0; + static int last_count; + + (void)q_context; + + if (type == EM_EVENT_TYPE_SW || type == g_options.etype) { + app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); + + switch (msgin->command) { + case CMD_TMO: + reuse = handle_periodic(eo_ctx, event); + break; + + case CMD_HEARTBEAT: /* uses atomic queue */ + handle_heartbeat(eo_ctx, event); + last_count = msgin->count; + reuse = 1; + break; + + case CMD_BGWORK: + reuse = do_bg_work(event, eo_ctx); + break; + + case CMD_DONE: /* HB atomic queue */ { + e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); + + /* only do this once */ + if (state == STATE_RUN && queue == eo_ctx->stop_q) { + __atomic_store_n(&eo_ctx->state, STATE_COOLOFF, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), STATE_COOLOFF, -1); + eo_ctx->last_hbcount = last_count; + eo_ctx->stopped = TIME_STAMP_FN(); + APPL_PRINT("Core %d reported DONE\n", msgin->id); + } + } + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event!\n"); + } + } else { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event type!\n"); + } + + if (!reuse) + em_free(event); +} + +int main(int argc, char *argv[]) +{ + /* pick app-specific arguments after '--' */ + int i; + + APPL_PRINT("EM periodic timer test %s\n\n", VERSION); + + for (i = 1; i < argc; i++) { + if (!strcmp(argv[i], "--")) + break; + } + if (i < argc) { + if (!parse_my_args(i, argc, argv)) { + APPL_PRINT("Invalid application arguments\n"); + return 1; + } + } + + return cm_setup(argc, argv); +} diff --git a/programs/performance/timer_test_periodic.h b/programs/performance/timer_test_periodic.h index 9b0f84a6..09fd298e 100644 --- a/programs/performance/timer_test_periodic.h +++ b/programs/performance/timer_test_periodic.h @@ -1,243 +1,243 @@ -#include - -#define APP_EO_NAME "testEO" -#define DEF_TMO_DATA 100 /* per core, MAX_TMO_DATA * sizeof(tmo_data) */ -#define MAX_TMO_BYTES 1000000000ULL /* sanity limit 1GB tracebuf per core */ -#define STOP_THRESHOLD 90 /* % of full buffer */ -#define MAX_CORES 64 -#define INIT_WAIT 5 /* startup wait, HBs (=secs) */ -#define MEAS_PERIOD 5 /* freq meas HBs */ -#define DEF_RES_NS 1000000ULL /* 1ms seems like a good generic default */ -#define DEF_PERIOD 20 /* default period, N * res */ -#define DEF_MIN_PERIOD 5 /* min period default, N * res */ -#define DEF_MAX_PERIOD (2 * 1000 * 1000 * 1000ULL) /* 2 sec */ -#define EXTRA_PRINTS 0 /* dev option, normally 0 */ -#define MAX_TEST_TIMERS 32 -#define TIME_STAMP_FN odp_time_global_ns -#define PRINT_MAX_TMRS 2 -#define MIN_COOLOFF 5 /* secs */ - -const struct option longopts[] = { - {"num-tmo", required_argument, NULL, 'n'}, - {"resolution", required_argument, NULL, 'r'}, - {"res_hz", required_argument, NULL, 'z'}, - {"period", required_argument, NULL, 'p'}, - {"first", required_argument, NULL, 'f'}, - {"max-period", required_argument, NULL, 'm'}, - {"min-period", required_argument, NULL, 'l'}, - {"clk", required_argument, NULL, 'c'}, - {"write", optional_argument, NULL, 'w'}, - {"num-runs", required_argument, NULL, 'x'}, - {"tracebuf", required_argument, NULL, 't'}, - {"extra-work", required_argument, NULL, 'e'}, - {"background-job", required_argument, NULL, 'j'}, - {"skip", no_argument, NULL, 's'}, - {"api-prof", no_argument, NULL, 'a'}, - {"dispatch-prof", no_argument, NULL, 'd'}, - {"job-prof", no_argument, NULL, 'b'}, - {"info", no_argument, NULL, 'i'}, - {"use-huge", no_argument, NULL, 'u'}, - {"no-delete", no_argument, NULL, 'q'}, - {"same-start", no_argument, NULL, 'S'}, - {"recreate", no_argument, NULL, 'R'}, - {"memzero", required_argument, NULL, 'o'}, - {"abort", required_argument, NULL, 'k'}, - {"num-timers", required_argument, NULL, 'y'}, - {"event-type", required_argument, NULL, 'g'}, - {"stop-limit", required_argument, NULL, 'L'}, - {"help", no_argument, NULL, 'h'}, - {NULL, 0, NULL, 0} -}; - -const char *shortopts = "n:r:p:f:m:l:c:w::x:t:e:j:sSRadbiuqhz:o:k:y:g:L:"; -/* descriptions for above options, keep in sync! */ -const char *descopts[] = { - "Number of concurrent timeouts to create", - "Resolution of test timer (ns), Use 0 for highest supported", - "Resolution of periodic test timer as frequency (Hz). Use either -r or -z", - "Period of periodic test timer (ns). 0 for random", - "First period (ns, default 0 = same as period, use negative for random up to)", - "Maximum period (ns)", - "Minimum period (ns, only used for random tmo)", - "Clock source (integer. See event_machine_timer_hw_specific.h)", - "Write raw trace data in csv format to given file e.g.-wtest.csv (default stdout)", - "Number of test runs, 0 to run forever", - "Trace buffer size (events per core). Optional stop threshold % e.g. -t100,80 to stop 80% before full", - "Extra work per tmo: -e1,20,50 e.g. min_us,max_us,propability % of work", - "Extra background job: -j2,20,500,10 e.g. num,time_us,total_kB,chunk_kB", - "Create timer without NOSKIP option", - "Measure API calls", - "Include dispatcher trace (EO enter-exit, analysis currently broken)", - "Include bg job profile (note - can fill buffer quickly)", - "Only print timer capabilities and exit", - "Use huge page for trace buffer", - "Don't delete timeouts between runs (if -x)", - "Use same starting tick value for all timeouts (EXPERIMENTAL)", - "Delete and re-create test timer (with -x)", - "Allocate and clear memory: -o50,100[,1] to clear 50MB (,1 to use huge pg) every 100ms. Special HW test, must also use -j", - "Abort application after given tmos (test abnormal exit). Use negative count to do segfault instead", - "Number of timers to use for test. Default 1", - "Use alternative event type for timeout events (dec. number). Default EM_EVENT_TYPE_SW", - "Stop rounds (-x) on timing error greater than given (ns). Default 0 = no stop", - "Print usage and exit", - NULL -}; - -const char *instructions = -"\nMain purpose of this experimental tool is to manually test periodic timer accuracy and\n" -"behaviour optionally under (over)load. Test is controlled by command line arguments.\n" -"No argument default runs a basic test and exits. Some API overheads can also be optionally measured.\n" -"\nAt least two EM timers are created. One for a heartbeat driving test states. Second\n" -"timer (or multiple) is used for testing the periodic timeouts. It can be created with\n" -"given attributes to also test limits. All the test timers are configured the same way.\n" -"If multiple timers are used the timeouts are randomly placed on those.\n\n" -"Test runs in states:\n" -" STATE_INIT let some time pass before starting\n" -" STATE_MEASURE measure timer tick frequency against linux clock\n" -" STATE_STABILIZE finish all prints before starting run\n" -" STATE_RUN timeouts created and measured\n" -" STATE_COOLOFF first core hitting trace buffer limit sets cooling,\n" -" i.e. coming timeout(s) are cancelled no more analyzed\n" -" STATE_ANALYZE Statistics and trace file generation, restart (if -x)\n" -"\nBy default there is no background load and the handling of incoming\n" -"timeouts (to high priority parallel queue) is minimized. Extra work can be\n" -"added in two ways:\n\n" -"1) --extra-work min us, max us, propability %\n" -" this will add random delay between min-max before calling ack().\n" -" Delay is added with the given propability (100 for all tmos)\n" -" e.g. -e10,100,50 to add random delay between 10 and 100us with 50%\n" -" propability\n" -"2) --background-job num,length us,total_kB,chunk_kB\n" -" this adds background work handled via separate low priority parallel\n" -" queue. num events are sent at start. Receiving touches given amount\n" -" of data (chunk at a time) for given amount of time and\n" -" then sends it again\n" -" e.g. -j1,10,500,20 adds one event of 10us processing over 20kB data\n" -" randomly picked from 500kB\n\n" -"Test can write a file of measured timings (-w). It is in CSV format and can\n" -"be imported e.g. to excel for plotting. -w without name prints to stdout\n" -"\nSingle time values can be postfixed with n,u,m,s to indicate nano(default),\n" -"micro, milli or seconds. e.g. -p1m for 1ms. Integer only\n"; - -typedef enum e_op { - OP_TMO, - OP_HB, - OP_STATE, - OP_CANCEL, - OP_WORK, - OP_ACK, - OP_BGWORK, - OP_MEMZERO, - OP_MEMZERO_END, - OP_PROF_ACK, /* linux time used as tick diff for each PROF */ - OP_PROF_DELETE, - OP_PROF_CANCEL, - OP_PROF_CREATE, - OP_PROF_SET, - OP_PROF_ENTER_CB, - OP_PROF_EXIT_CB, - OP_PROF_TMR_CREATE, - OP_PROF_TMR_DELETE, - - OP_LAST -} e_op; -const char *op_labels[] = { - "TMO", - "HB", - "STATE", - "CANCEL", - "WORK", - "ACK", - "BG-WORK", - "MEMZERO-TST", - "MEMZERO-END", - "PROF-ACK", - "PROF-DEL", - "PROF-CANCEL", - "PROF-CREATE", - "PROF-SET", - "PROF-ENTER_CB", - "PROF-EXIT_CB", - "PROF-TMR_CREATE", - "PROF-TMR-DELETE", - - "" -}; - -typedef struct tmo_trace { - int id; - e_op op; - uint64_t tick; - uint64_t ts; - uint64_t linuxt; - int count; - int tidx; -} tmo_trace; - -#define RND_STATE_BUF 32 -typedef struct rnd_state_t { - struct random_data rdata; - char rndstate[RND_STATE_BUF]; -} rnd_state_t; - -typedef struct core_data { - int count ODP_ALIGNED_CACHE; - tmo_trace *trc; - size_t trc_size; - int cancelled; - int jobs; - int jobs_deleted; - rnd_state_t rng; - uint64_t enter; - uint64_t acc_time; -} core_data; - -typedef enum e_cmd { - CMD_HEARTBEAT, - CMD_TMO, - CMD_DONE, - CMD_BGWORK, - - CMD_LAST -} e_cmd; - -typedef struct app_msg_t { - e_cmd command; - int count; - em_tmo_t tmo; - int tidx; - int id; - uint64_t arg; -} app_msg_t; - -typedef enum e_state { - STATE_INIT, /* before start */ - STATE_MEASURE, /* measure timer freq */ - STATE_STABILIZE,/* finish all printing before tmo setup */ - STATE_RUN, /* timers running */ - STATE_COOLOFF, /* cores cancel timers */ - STATE_ANALYZE, /* timestamps analyzed */ - - STATE_LAST -} e_state; - -const char *state_labels[] = { - "INIT", - "MEASURE", - "STABILIZE", - "RUN", - "COOLOFF", - "ANALYZE" -}; - -typedef struct tmo_setup { - uint64_t start_ts; - em_tmo_t handle; - uint64_t start; - uint64_t period_ns; - uint64_t first_ns; - uint64_t first; - uint64_t ticks; - uint64_t ack_late; - int tidx; -} tmo_setup; +#include + +#define APP_EO_NAME "testEO" +#define DEF_TMO_DATA 100 /* per core, MAX_TMO_DATA * sizeof(tmo_data) */ +#define MAX_TMO_BYTES 1000000000ULL /* sanity limit 1GB tracebuf per core */ +#define STOP_THRESHOLD 90 /* % of full buffer */ +#define MAX_CORES 64 +#define INIT_WAIT 5 /* startup wait, HBs (=secs) */ +#define MEAS_PERIOD 5 /* freq meas HBs */ +#define DEF_RES_NS 1000000ULL /* 1ms seems like a good generic default */ +#define DEF_PERIOD 20 /* default period, N * res */ +#define DEF_MIN_PERIOD 5 /* min period default, N * res */ +#define DEF_MAX_PERIOD (2 * 1000 * 1000 * 1000ULL) /* 2 sec */ +#define EXTRA_PRINTS 0 /* dev option, normally 0 */ +#define MAX_TEST_TIMERS 32 +#define TIME_STAMP_FN odp_time_global_ns +#define PRINT_MAX_TMRS 2 +#define MIN_COOLOFF 5 /* secs */ + +const struct option longopts[] = { + {"num-tmo", required_argument, NULL, 'n'}, + {"resolution", required_argument, NULL, 'r'}, + {"res_hz", required_argument, NULL, 'z'}, + {"period", required_argument, NULL, 'p'}, + {"first", required_argument, NULL, 'f'}, + {"max-period", required_argument, NULL, 'm'}, + {"min-period", required_argument, NULL, 'l'}, + {"clk", required_argument, NULL, 'c'}, + {"write", optional_argument, NULL, 'w'}, + {"num-runs", required_argument, NULL, 'x'}, + {"tracebuf", required_argument, NULL, 't'}, + {"extra-work", required_argument, NULL, 'e'}, + {"background-job", required_argument, NULL, 'j'}, + {"skip", no_argument, NULL, 's'}, + {"api-prof", no_argument, NULL, 'a'}, + {"dispatch-prof", no_argument, NULL, 'd'}, + {"job-prof", no_argument, NULL, 'b'}, + {"info", no_argument, NULL, 'i'}, + {"use-huge", no_argument, NULL, 'u'}, + {"no-delete", no_argument, NULL, 'q'}, + {"same-start", no_argument, NULL, 'S'}, + {"recreate", no_argument, NULL, 'R'}, + {"memzero", required_argument, NULL, 'o'}, + {"abort", required_argument, NULL, 'k'}, + {"num-timers", required_argument, NULL, 'y'}, + {"event-type", required_argument, NULL, 'g'}, + {"stop-limit", required_argument, NULL, 'L'}, + {"help", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0} +}; + +const char *shortopts = "n:r:p:f:m:l:c:w::x:t:e:j:sSRadbiuqhz:o:k:y:g:L:"; +/* descriptions for above options, keep in sync! */ +const char *descopts[] = { + "Number of concurrent timeouts to create", + "Resolution of test timer (ns), Use 0 for highest supported", + "Resolution of periodic test timer as frequency (Hz). Use either -r or -z", + "Period of periodic test timer (ns). 0 for random", + "First period (ns, default 0 = same as period, use negative for random up to)", + "Maximum period (ns)", + "Minimum period (ns, only used for random tmo)", + "Clock source (integer. See event_machine_timer_hw_specific.h)", + "Write raw trace data in csv format to given file e.g.-wtest.csv (default stdout)", + "Number of test runs, 0 to run forever", + "Trace buffer size (events per core). Optional stop threshold % e.g. -t100,80 to stop 80% before full", + "Extra work per tmo: -e1,20,50 e.g. min_us,max_us,probability % of work", + "Extra background job: -j2,20,500,10 e.g. num,time_us,total_kB,chunk_kB", + "Create timer without NOSKIP option", + "Measure API calls", + "Include dispatcher trace (EO enter-exit, analysis currently broken)", + "Include bg job profile (note - can fill buffer quickly)", + "Only print timer capabilities and exit", + "Use huge page for trace buffer", + "Don't delete timeouts between runs (if -x)", + "Use same starting tick value for all timeouts (EXPERIMENTAL)", + "Delete and re-create test timer (with -x)", + "Allocate and clear memory: -o50,100[,1] to clear 50MB (,1 to use huge pg) every 100ms. Special HW test, must also use -j", + "Abort application after given tmos (test abnormal exit). Use negative count to do segfault instead", + "Number of timers to use for test. Default 1", + "Use alternative event type for timeout events (dec. number). Default EM_EVENT_TYPE_SW", + "Stop rounds (-x) on timing error greater than given (ns). Default 0 = no stop", + "Print usage and exit", + NULL +}; + +const char *instructions = +"\nMain purpose of this experimental tool is to manually test periodic timer accuracy and\n" +"behaviour optionally under (over)load. Test is controlled by command line arguments.\n" +"No argument default runs a basic test and exits. Some API overheads can also be optionally measured.\n" +"\nAt least two EM timers are created. One for a heartbeat driving test states. Second\n" +"timer (or multiple) is used for testing the periodic timeouts. It can be created with\n" +"given attributes to also test limits. All the test timers are configured the same way.\n" +"If multiple timers are used the timeouts are randomly placed on those.\n\n" +"Test runs in states:\n" +" STATE_INIT let some time pass before starting\n" +" STATE_MEASURE measure timer tick frequency against linux clock\n" +" STATE_STABILIZE finish all prints before starting run\n" +" STATE_RUN timeouts created and measured\n" +" STATE_COOLOFF first core hitting trace buffer limit sets cooling,\n" +" i.e. coming timeout(s) are cancelled no more analyzed\n" +" STATE_ANALYZE Statistics and trace file generation, restart (if -x)\n" +"\nBy default there is no background load and the handling of incoming\n" +"timeouts (to high priority parallel queue) is minimized. Extra work can be\n" +"added in two ways:\n\n" +"1) --extra-work min us, max us, probability %\n" +" this will add random delay between min-max before calling ack().\n" +" Delay is added with the given probability (100 for all tmos)\n" +" e.g. -e10,100,50 to add random delay between 10 and 100us with 50%\n" +" probability\n" +"2) --background-job num,length us,total_kB,chunk_kB\n" +" this adds background work handled via separate low priority parallel\n" +" queue. num events are sent at start. Receiving touches given amount\n" +" of data (chunk at a time) for given amount of time and\n" +" then sends it again\n" +" e.g. -j1,10,500,20 adds one event of 10us processing over 20kB data\n" +" randomly picked from 500kB\n\n" +"Test can write a file of measured timings (-w). It is in CSV format and can\n" +"be imported e.g. to excel for plotting. -w without name prints to stdout\n" +"\nSingle time values can be postfixed with n,u,m,s to indicate nano(default),\n" +"micro, milli or seconds. e.g. -p1m for 1ms. Integer only\n"; + +typedef enum e_op { + OP_TMO, + OP_HB, + OP_STATE, + OP_CANCEL, + OP_WORK, + OP_ACK, + OP_BGWORK, + OP_MEMZERO, + OP_MEMZERO_END, + OP_PROF_ACK, /* linux time used as tick diff for each PROF */ + OP_PROF_DELETE, + OP_PROF_CANCEL, + OP_PROF_CREATE, + OP_PROF_SET, + OP_PROF_ENTER_CB, + OP_PROF_EXIT_CB, + OP_PROF_TMR_CREATE, + OP_PROF_TMR_DELETE, + + OP_LAST +} e_op; +const char *op_labels[] = { + "TMO", + "HB", + "STATE", + "CANCEL", + "WORK", + "ACK", + "BG-WORK", + "MEMZERO-TST", + "MEMZERO-END", + "PROF-ACK", + "PROF-DEL", + "PROF-CANCEL", + "PROF-CREATE", + "PROF-SET", + "PROF-ENTER_CB", + "PROF-EXIT_CB", + "PROF-TMR_CREATE", + "PROF-TMR-DELETE", + + "" +}; + +typedef struct tmo_trace { + int id; + e_op op; + uint64_t tick; + uint64_t ts; + uint64_t linuxt; + int count; + int tidx; +} tmo_trace; + +#define RND_STATE_BUF 32 +typedef struct rnd_state_t { + struct random_data rdata; + char rndstate[RND_STATE_BUF]; +} rnd_state_t; + +typedef struct core_data { + int count ODP_ALIGNED_CACHE; + tmo_trace *trc; + size_t trc_size; + int cancelled; + int jobs; + int jobs_deleted; + rnd_state_t rng; + uint64_t enter; + uint64_t acc_time; +} core_data; + +typedef enum e_cmd { + CMD_HEARTBEAT, + CMD_TMO, + CMD_DONE, + CMD_BGWORK, + + CMD_LAST +} e_cmd; + +typedef struct app_msg_t { + e_cmd command; + int count; + em_tmo_t tmo; + int tidx; + int id; + uint64_t arg; +} app_msg_t; + +typedef enum e_state { + STATE_INIT, /* before start */ + STATE_MEASURE, /* measure timer freq */ + STATE_STABILIZE,/* finish all printing before tmo setup */ + STATE_RUN, /* timers running */ + STATE_COOLOFF, /* cores cancel timers */ + STATE_ANALYZE, /* timestamps analyzed */ + + STATE_LAST +} e_state; + +const char *state_labels[] = { + "INIT", + "MEASURE", + "STABILIZE", + "RUN", + "COOLOFF", + "ANALYZE" +}; + +typedef struct tmo_setup { + uint64_t start_ts; + em_tmo_t handle; + uint64_t start; + uint64_t period_ns; + uint64_t first_ns; + uint64_t first; + uint64_t ticks; + uint64_t ack_late; + int tidx; +} tmo_setup; diff --git a/programs/performance/timer_test_ring.c b/programs/performance/timer_test_ring.c index 39568255..66fdb3e2 100644 --- a/programs/performance/timer_test_ring.c +++ b/programs/performance/timer_test_ring.c @@ -1,1480 +1,1489 @@ -/* - * Copyright (c) 2023, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine timer ring test for alternative periodic timeouts. - * - * see instruction text at timer_test_ring.h. - * - */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "timer_test_ring.h" - -#define VERSION "WIP v0.3" -struct { - unsigned int loops; - em_fract_u64_t basehz[MAX_TEST_TIMERS]; - uint64_t multiplier[MAX_TEST_TIMERS]; - uint64_t res_ns[MAX_TEST_TIMERS]; - uint64_t max_mul[MAX_TEST_TMO]; - uint64_t start_offset[MAX_TEST_TMO]; - unsigned int num_timers; - unsigned int looptime; - bool recreate; - bool reuse_tmo; - bool reuse_ev; - bool profile; - unsigned int num_tmo; - int64_t delay_us; - em_timer_clksrc_t clksrc; - unsigned int tracelen; - char tracefile[MAX_FILENAME]; - -} g_options = { - .loops = 1, - .basehz = { {100, 0, 0 } }, /* per timer */ - .multiplier = { 1 }, /* per timerout */ - .res_ns = { 0 }, /* per timer */ - .max_mul = { 8 }, /* per timer */ - .start_offset = { 0 }, /* per timeout */ - .num_timers = 1, - .looptime = 30, - .recreate = false, - .reuse_tmo = false, - .reuse_ev = false, - .profile = false, - .num_tmo = 1, - .delay_us = 0, - .clksrc = 0, - .tracelen = 0, - .tracefile = "stdout" -}; - -static timer_app_shm_t *m_shm; -static odp_shm_t odp_shm; -static odp_shm_t odp_shm_trace; -static __thread trace_entry_t *m_tracebuf; -static __thread unsigned int m_tracecount; -odp_ticketlock_t tracelock; - -/* --------------------------------------- */ -static void usage(void); -static int parse_my_args(int first, int argc, char *argv[]); -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); -static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo); -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, void *q_context); -static em_status_t my_error_handler(em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args); -static bool handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event, - app_msg_t *msgin, uint64_t now); -static bool handle_tmo(app_eo_ctx_t *eo_ctx, em_event_t event, uint64_t now); -static void analyze_and_print(app_eo_ctx_t *eo_ctx, int loop); -static void global_summary(app_eo_ctx_t *eo_ctx); -static void print_setup(void); -static void restart(app_eo_ctx_t *eo_ctx, int count); -static void delete_test_timer(app_eo_ctx_t *eo_ctx); -static void create_test_timer(app_eo_ctx_t *eo_ctx); -static int split_list(char *str, uint64_t *list, int maxnum); -static int split_float_list(char *str, em_fract_u64_t *list, int maxnum); -static void approx_fract(double f, em_fract_u64_t *fract); -static void fix_setup(void); -static void create_test_timeouts(app_eo_ctx_t *eo_ctx); -static void delete_test_timeouts(app_eo_ctx_t *eo_ctx, bool force); -static void delete_test_events(app_eo_ctx_t *eo_ctx, bool force); -static void dump_trace(app_eo_ctx_t *eo_ctx); -static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx); -static void exit_cb(em_eo_t eo); -static void extra_delay(rnd_state_t *rnd, int core, unsigned int tmri, unsigned int tmoi); - -/* --------------------------------------- */ -em_status_t my_error_handler(em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args) -{ - if (escope == 0xDEAD) { /* test_fatal_if */ - char *file = va_arg(args, char*); - const char *func = va_arg(args, const char*); - const int line = va_arg(args, const int); - const char *format = va_arg(args, const char*); - const char *base = basename(file); - - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wformat-nonliteral" - fprintf(stderr, "FATAL - %s:%d, %s():\n", - base, line, func); - vfprintf(stderr, format, args); - #pragma GCC diagnostic pop - } - return test_error_handler(eo, error, escope, args); -} - -static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx) -{ - app_eo_ctx_t *const my_eo_ctx = *eo_ctx; - int core = em_core_id(); - - (void)eo; - (void)queue; - (void)q_ctx; - (void)events; - (void)num; - - if (unlikely(!my_eo_ctx)) - return; - - my_eo_ctx->cdat[core].enter_ns = TEST_TIME_FN(); - if (likely(my_eo_ctx->cdat[core].exit_ns)) - my_eo_ctx->cdat[core].non_eo_ns += my_eo_ctx->cdat[core].enter_ns - - my_eo_ctx->cdat[core].exit_ns; -} - -static void exit_cb(em_eo_t eo) -{ - app_eo_ctx_t *const my_eo_ctx = em_eo_get_context(eo); - int core = em_core_id(); - - if (unlikely(!my_eo_ctx)) - return; - - my_eo_ctx->cdat[core].exit_ns = TEST_TIME_FN(); - my_eo_ctx->cdat[core].eo_ns += my_eo_ctx->cdat[core].exit_ns - - my_eo_ctx->cdat[core].enter_ns; -} - -static inline double frac2float(em_fract_u64_t frac) -{ - double f = frac.integer; - - if (frac.numer) - f += (double)frac.numer / (double)frac.denom; - return f; -} - -static inline void *tmo2ptr(unsigned int tmr, unsigned int tmo) -{ - return (void *)(((uint64_t)tmr << 16) + tmo); -} - -static inline void ptr2tmo(void *ptr, unsigned int *tmr, unsigned int *tmo) -{ - uint64_t x = (uint64_t)ptr; - - *tmo = x & 0xFFFF; - *tmr = x >> 16; -} - -static inline void profile_add(uint64_t t1, uint64_t t2, prof_apis api, - app_eo_ctx_t *eo_ctx, int core) -{ - uint64_t diff = t2 - t1; - - if (eo_ctx->cdat[core].prof[api].min > diff) - eo_ctx->cdat[core].prof[api].min = diff; - if (eo_ctx->cdat[core].prof[api].max < diff) - eo_ctx->cdat[core].prof[api].max = diff; - eo_ctx->cdat[core].prof[api].acc += diff; - eo_ctx->cdat[core].prof[api].num++; -} - -static inline void trace_add(uint64_t ts, trace_op_t op, uint32_t val, - int64_t arg1, int64_t arg2, void *arg3, void *arg4) -{ - trace_entry_t *tp; - - if (g_options.tracelen == 0) - return; /* disabled */ - - if (unlikely(m_tracecount >= g_options.tracelen)) { /* overflow marker */ - tp = &m_tracebuf[g_options.tracelen - 1]; - tp->ns = TEST_TIME_FN(); - tp->op = TRACE_OP_LAST; - tp->val = val; - tp->arg1 = arg1; - tp->arg2 = arg2; - tp->arg3 = arg3; - tp->arg4 = arg4; - return; - } - - tp = &m_tracebuf[m_tracecount]; - tp->ns = ts; - tp->op = op; - tp->val = val; - tp->arg1 = arg1; - tp->arg2 = arg2; - tp->arg3 = arg3; - tp->arg4 = arg4; - - m_tracecount++; -} - -static void extra_delay(rnd_state_t *rnd, int core, unsigned int tmri, unsigned int tmoi) -{ - uint64_t t1 = TEST_TIME_FN(); - uint64_t ns; - - if (g_options.delay_us < 0) { /* random */ - int32_t r1; - - random_r(&rnd->rdata, &r1); - ns = (uint64_t)r1 % (1000 * (labs(g_options.delay_us) + 1)); - } else { - ns = g_options.delay_us * 1000UL; - } - - trace_add(TEST_TIME_FN(), TRACE_OP_DELAY, core, tmri, tmoi, (void *)ns, NULL); - while (TEST_TIME_FN() < (ns + t1)) { - /* delay */ - }; -} - -static void dump_trace(app_eo_ctx_t *eo_ctx) -{ - static bool title = true; /* header once */ - - if (g_options.tracelen == 0) - return; - - FILE *df = stdout; - - if (strcmp(g_options.tracefile, "stdout")) - df = fopen(g_options.tracefile, title ? "w" : "a"); - - if (!df) { - APPL_PRINT("Failed to open dump file!\n"); - return; - } - - if (title) { - fprintf(df, "#BEGIN RING TRACE FORMAT 1\n"); - /* dump setup */ - fprintf(df, "cores,loops,num_timer,num_tmo,recreate_tmr,reuse_tmo,reuse_ev,delay_us,tracelen,ver\n"); - fprintf(df, "%u,%u,%u,%u,%u,%u,%u,%ld,%u,%s\n", m_shm->core_count, - g_options.loops, g_options.num_timers, g_options.num_tmo, - g_options.recreate, g_options.reuse_tmo, g_options.reuse_ev, - g_options.delay_us, g_options.tracelen, VERSION); - /* dump timeouts */ - fprintf(df, "#TMO:\ntmr,tmo,tick_hz,res_ns,base_hz,mul,startrel\n"); - for (unsigned int tmr = 0; tmr < g_options.num_timers; tmr++) - for (unsigned int tmo = 0; tmo < g_options.num_tmo; tmo++) { - fprintf(df, "%u,%u,%lu,%lu,%f,%lu,%lu\n", - tmr, tmo, eo_ctx->tick_hz[tmr], g_options.res_ns[tmr], - frac2float(g_options.basehz[tmr]), - g_options.multiplier[tmo], g_options.start_offset[tmo]); - } - - /* and then the trace events */ - fprintf(df, "#EVENTS:\n"); - fprintf(df, "core,ns,op,arg1,arg2,arg3,arg4\n"); - } - - for (uint32_t count = 0; count < m_tracecount; count++) { - trace_entry_t *tp = &m_tracebuf[count]; - - test_fatal_if(tp->op > TRACE_OP_LAST, "Invalid trace op %u!", tp->op); - - fprintf(df, "%u,%lu,%s,%ld,%ld,%p,%p\n", - tp->val, tp->ns, trace_op_labels[tp->op], - tp->arg1, tp->arg2, tp->arg3, tp->arg4); - } - - if (df != stdout) - fclose(df); - title = false; -} - -static void usage(void) -{ - printf("%s\n", instructions); - - printf("Options:\n"); - for (int i = 0; ; i++) { - if (longopts[i].name == NULL || descopts[i] == NULL) - break; - printf("--%s or -%c: %s\n", longopts[i].name, longopts[i].val, descopts[i]); - } -} - -static void print_timers(void) -{ - em_timer_attr_t attr; - em_timer_t tmr[PRINT_MAX_TMRS]; - - int num_timers = em_timer_get_all(tmr, PRINT_MAX_TMRS); - - for (int i = 0; i < (num_timers > PRINT_MAX_TMRS ? PRINT_MAX_TMRS : num_timers); i++) { - test_fatal_if(em_timer_get_attr(tmr[i], &attr) != EM_OK, "Can't get timer info\n"); - - APPL_PRINT("Timer \"%s\" info:\n", attr.name); - APPL_PRINT(" -is ring: "); - if (attr.flags & EM_TIMER_FLAG_RING) { - double hz = frac2float(attr.ringparam.base_hz); - - APPL_PRINT(" yes (base_hz %.3f, max_mul %lu)\n", - hz, attr.ringparam.max_mul); - APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.ringparam.res_ns); - APPL_PRINT(" -clk_src: %d\n", attr.ringparam.clk_src); - } else { - APPL_PRINT("no\n"); - APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); - APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); - APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); - } - APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); - APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", em_timer_get_freq(tmr[i])); - } -} - -/* adjust timer setup basehz,mul to same lengths */ -static void fix_setup(void) -{ - for (unsigned int i = 1; i < g_options.num_timers; i++) { - if (g_options.basehz[i].integer == 0) { - g_options.basehz[i].integer = 100; - g_options.basehz[i].numer = 0; - } - if (g_options.max_mul[i] == 0) - g_options.max_mul[i] = 8; - /* res 0 is ok = default */ - } - for (unsigned int i = 1; i < g_options.num_tmo; i++) { - if (g_options.multiplier[i] == 0) - g_options.multiplier[i] = 1; - } - - if (g_options.recreate && g_options.reuse_tmo) { - APPL_PRINT("\nWARNING: Can't recreate timers AND re-use tmo, re-use disabled\n"); - g_options.reuse_tmo = false; - } -} - -/* separate comma limited integer argument */ -static int split_list(char *str, uint64_t *list, int maxnum) -{ - int num = 0; - char *p = strtok(str, ","); - - while (p) { - list[num] = (uint64_t)atoll(p); - num++; - if (num >= maxnum) - break; - p = strtok(NULL, ","); - } - - return num; -} - -/* this could be better, but for now just use fixed point to 100th */ -static void approx_fract(double val, em_fract_u64_t *fract) -{ - double intp; - double p = modf(val, &intp); - - fract->numer = round(100 * p); - fract->denom = 100; -} - -/* separate comma limited float argument */ -static int split_float_list(char *str, em_fract_u64_t *list, int maxnum) -{ - int num = 0; - char *p = strtok(str, ","); - - while (p) { - list[num].integer = (uint64_t)atoll(p); - approx_fract(atof(p), &list[num]); - num++; - if (num >= maxnum) - break; - p = strtok(NULL, ","); - } - - return num; -} - -static int parse_my_args(int first, int argc, char *argv[]) -{ - optind = first + 1; /* skip '--' */ - while (1) { - int opt; - int long_index; - char *endptr; - long num; - - opt = getopt_long(argc, argv, shortopts, longopts, &long_index); - - if (opt == -1) - break; /* No more options */ - - switch (opt) { - case 'b': { - uint64_t hz[MAX_TEST_TIMERS]; - - num = split_list(optarg, hz, MAX_TEST_TIMERS); - if (num < 1) - return 0; - for (int i = 0; i < num; i++) { - g_options.basehz[i].integer = hz[i]; - g_options.basehz[i].numer = 0; - } - if (num > g_options.num_timers) - g_options.num_timers = num; - } - break; - - case 'f': - num = split_float_list(optarg, &g_options.basehz[0], MAX_TEST_TIMERS); - if (num < 1) - return 0; - if (num > g_options.num_timers) - g_options.num_timers = num; - break; - - case 'l': - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.loops = (unsigned int)num; - break; - - case 't': - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.looptime = (int)num; - break; - - case 'c': - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 0) - return 0; - g_options.clksrc = (em_timer_clksrc_t)num; - break; - - case 'n': - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.num_tmo = (unsigned int)num; - break; - - case 'T': - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 0) - return 0; - g_options.tracelen = (unsigned int)num; - break; - - case 'd': - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0') - return 0; - g_options.delay_us = (int64_t)num; - break; - - case 'w': { /* optional arg */ - if (optarg != NULL) { - if (strlen(optarg) >= MAX_FILENAME) - return 0; - strncpy(g_options.tracefile, optarg, MAX_FILENAME); - } - } - break; - - case 'r': - num = split_list(optarg, &g_options.res_ns[0], MAX_TEST_TIMERS); - if (num < 1) - return 0; - if (num > g_options.num_timers) - g_options.num_timers = num; - break; - - case 'm': - num = split_list(optarg, &g_options.multiplier[0], MAX_TEST_TMO); - if (num < 1) - return 0; - break; - - case 'o': - num = split_list(optarg, &g_options.start_offset[0], MAX_TEST_TMO); - if (num < 1) - return 0; - break; - - case 'M': - num = split_list(optarg, &g_options.max_mul[0], MAX_TEST_TIMERS); - if (num < 1) - return 0; - if (num > g_options.num_timers) - g_options.num_timers = num; - break; - - case 'R': - g_options.recreate = true; - break; - - case 'a': - g_options.profile = true; - break; - - case 'N': - g_options.reuse_tmo = true; - break; - - case 'E': - g_options.reuse_ev = true; - break; - - case 'h': - default: - opterr = 0; - usage(); - return 0; - } - } - - optind = 1; /* cm_setup() to parse again */ - return 1; -} - -void print_setup(void) -{ - APPL_PRINT("\nActive run options:\n"); - APPL_PRINT(" - loops: %d\n", g_options.loops); - APPL_PRINT(" - looptime: %d s\n", g_options.looptime); - APPL_PRINT(" - num timers: %d\n", g_options.num_timers); - APPL_PRINT(" - tmo per timer: %d\n", g_options.num_tmo); - APPL_PRINT(" - recreate timer: %s\n", g_options.recreate ? "yes" : "no"); - APPL_PRINT(" - reuse tmo: %s\n", g_options.reuse_tmo ? "yes" : "no"); - APPL_PRINT(" - reuse event: %s\n", g_options.reuse_ev ? "yes" : "no"); - if (g_options.tracelen) { - APPL_PRINT(" - tracebuf: %u\n", g_options.tracelen); - APPL_PRINT(" - tracefile: %s\n", g_options.tracefile); - } - APPL_PRINT(" - profile APIs: %s\n", g_options.profile ? "yes" : "no"); - APPL_PRINT(" - extra delay: %ld us %s\n", labs(g_options.delay_us), - g_options.delay_us < 0 ? "(rnd)" : ""); - - APPL_PRINT("\nTimer tmo basehz max_mul res_ns startrel mul ->hz\n"); - - for (unsigned int i = 0; i < g_options.num_timers; i++) { - double hz = frac2float(g_options.basehz[i]); - - for (unsigned int t = 0; t < g_options.num_tmo; t++) { - APPL_PRINT("%-5u %-4u %-15.3f %-9lu %-14lu %-12lu %-8lu %.3f\n", - i, t, hz, g_options.max_mul[i], - g_options.res_ns[i], g_options.start_offset[t], - g_options.multiplier[t], hz * (double)g_options.multiplier[t]); - } - } - APPL_PRINT("\n"); -} - -static void delete_test_timer(app_eo_ctx_t *eo_ctx) -{ - for (unsigned int i = 0; i < g_options.num_timers; i++) { - if (eo_ctx->test_tmr[i] != EM_TIMER_UNDEF) { - trace_add(TEST_TIME_FN(), TRACE_OP_TMR_DELETE, em_core_id(), - i, -1, NULL, eo_ctx->test_tmr[i]); - em_status_t rv = em_timer_delete(eo_ctx->test_tmr[i]); - - test_fatal_if(rv != EM_OK, "Ring timer[%d] delete fail, rv %d!", i, rv); - APPL_PRINT("Deleted test timer[%d]: %p\n", i, eo_ctx->test_tmr[i]); - eo_ctx->test_tmr[i] = EM_TIMER_UNDEF; - } - } -} - -static void create_test_timer(app_eo_ctx_t *eo_ctx) -{ - em_timer_attr_t rattr; - - for (unsigned int i = 0; i < g_options.num_timers; i++) { - em_status_t stat = em_timer_ring_attr_init(&rattr, - g_options.clksrc, - g_options.basehz[i].integer, - g_options.max_mul[i], - g_options.res_ns[i]); - - if (g_options.basehz[i].numer) { - rattr.ringparam.base_hz.numer = g_options.basehz[i].numer; - rattr.ringparam.base_hz.denom = g_options.basehz[i].denom; - } - if (EXTRA_PRINTS) { - APPL_PRINT("\nInitialized ring attr:\n"); - APPL_PRINT(" -clksrc: %u\n", g_options.clksrc); - APPL_PRINT(" -num_tmo: %u\n", rattr.num_tmo); - APPL_PRINT(" -base_hz: %" PRIu64 "\n", rattr.ringparam.base_hz.integer); - APPL_PRINT(" -base_hz n/d: %lu/%lu\n", rattr.ringparam.base_hz.numer, - rattr.ringparam.base_hz.denom); - APPL_PRINT(" -max_mul: %lu\n", rattr.ringparam.max_mul); - APPL_PRINT(" -res_ns: %lu\n", rattr.ringparam.res_ns); - } - - if (stat != EM_OK) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Ring parameters not supported, ret %u!", stat); - } - - if (g_options.basehz[i].numer) { /* re-check values */ - em_timer_ring_param_t ring = rattr.ringparam; - - if (em_timer_ring_capability(&rattr.ringparam) == EM_ERR_NOT_SUPPORTED) { - APPL_PRINT("WARN: Arguments not exactly supported:\n"); - - APPL_PRINT("base_hz: %lu %lu/%lu -> %lu %lu/%lu\n", - ring.base_hz.integer, ring.base_hz.numer, - ring.base_hz.denom, rattr.ringparam.base_hz.integer, - rattr.ringparam.base_hz.numer, - rattr.ringparam.base_hz.denom); - APPL_PRINT("max_mul: %lu -> %lu\n", - ring.max_mul, rattr.ringparam.max_mul); - APPL_PRINT("res_ns: %lu -> %lu\n", - ring.res_ns, rattr.ringparam.res_ns); - } - } - strncpy(rattr.name, "RingTmr", EM_TIMER_NAME_LEN); - em_timer_t rtmr = em_timer_ring_create(&rattr); - - trace_add(TEST_TIME_FN(), TRACE_OP_TMR_CREATE, em_core_id(), i, -1, NULL, rtmr); - test_fatal_if(rtmr == EM_TIMER_UNDEF, "Ring timer create fail!"); - eo_ctx->test_tmr[i] = rtmr; - eo_ctx->tick_hz[i] = em_timer_get_freq(rtmr); - if (EXTRA_PRINTS) - APPL_PRINT("Created test timer[%d]: %p\n", i, rtmr); - } /* next timer */ -} - -static void create_test_timeouts(app_eo_ctx_t *eo_ctx) -{ - /* create test timeout(s) */ - for (unsigned int t = 0; t < g_options.num_timers; t++) { - for (unsigned int to = 0; to < g_options.num_tmo; to++) { - em_tmo_args_t args = { .userptr = tmo2ptr(t, to) }; - - if (eo_ctx->test_tmo[t][to] == EM_TMO_UNDEF) { - uint64_t t1 = TEST_TIME_FN(); - - eo_ctx->test_tmo[t][to] = em_tmo_create_arg(eo_ctx->test_tmr[t], - EM_TMO_FLAG_PERIODIC, - eo_ctx->test_q, - &args); - profile_add(t1, TEST_TIME_FN(), PROF_TMO_CREATE, - eo_ctx, em_core_id()); - trace_add(t1, TRACE_OP_TMO_CREATE, em_core_id(), - t, to, eo_ctx->test_tmr[t], eo_ctx->test_tmo[t][to]); - test_fatal_if(eo_ctx->test_tmo[t][to] == EM_TMO_UNDEF, - "Can't allocate test_tmo!\n"); - } - - uint64_t tick_now = em_timer_current_tick(eo_ctx->test_tmr[t]); - uint64_t t1 = TEST_TIME_FN(); - uint64_t startabs = 0; - - if (g_options.start_offset[to]) - startabs = tick_now + g_options.start_offset[to]; - trace_add(t1, TRACE_OP_TMO_SET, em_core_id(), - t, to, (void *)tick_now, eo_ctx->test_ev[t][to]); - t1 = TEST_TIME_FN(); - em_status_t stat = em_tmo_set_periodic_ring(eo_ctx->test_tmo[t][to], - startabs, - g_options.multiplier[to], - eo_ctx->test_ev[t][to]); - - profile_add(t1, TEST_TIME_FN(), PROF_TMO_SET, eo_ctx, em_core_id()); - test_fatal_if(stat != EM_OK, "Can't activate test tmo[%d][%d], ret %u!\n", - t, to, stat); - eo_ctx->first_time[t][to] = t1; - eo_ctx->test_ev[t][to] = EM_EVENT_UNDEF; /* now given to timer */ - } - } -} - -static void delete_test_timeouts(app_eo_ctx_t *eo_ctx, bool force) -{ - int core = em_core_id(); - - /* force == true means final cleanup, otherwise may skip if re-use option is active */ - - for (unsigned int ti = 0; ti < g_options.num_timers; ti++) { - for (unsigned int tmoi = 0; tmoi < g_options.num_tmo; tmoi++) { - if (eo_ctx->test_tmo[ti][tmoi] == EM_TMO_UNDEF) - continue; - - em_tmo_state_t s = em_tmo_get_state(eo_ctx->test_tmo[ti][tmoi]); - - test_fatal_if(s == EM_TMO_STATE_ACTIVE, - "Unexpected tmo state ACTIVE after cancel\n"); - - if (!g_options.reuse_tmo || force) { - em_event_t ev = EM_EVENT_UNDEF; - uint64_t t1 = TEST_TIME_FN(); - - trace_add(t1, TRACE_OP_TMO_DELETE, core, - ti, tmoi, NULL, eo_ctx->test_tmo[ti][tmoi]); - - em_status_t rv = em_tmo_delete(eo_ctx->test_tmo[ti][tmoi], &ev); - - profile_add(t1, TEST_TIME_FN(), PROF_TMO_DELETE, eo_ctx, core); - test_fatal_if(rv != EM_OK, "tmo_delete fail, tmo = %p!", - eo_ctx->test_tmo[ti][tmoi]); - test_fatal_if(ev != EM_EVENT_UNDEF, - "Unexpected - tmo delete returned event %p", ev); - eo_ctx->test_tmo[ti][tmoi] = EM_TMO_UNDEF; - } - } - } -} - -static void delete_test_events(app_eo_ctx_t *eo_ctx, bool force) -{ - int core = em_core_id(); - - for (unsigned int ti = 0; ti < g_options.num_timers; ti++) { - for (unsigned int tmoi = 0; tmoi < g_options.num_tmo; tmoi++) { - if (eo_ctx->test_ev[ti][tmoi] == EM_EVENT_UNDEF) - continue; - if (!g_options.reuse_ev || force) { - trace_add(TEST_TIME_FN(), TRACE_OP_TMO_EV_FREE, - core, ti, tmoi, eo_ctx->test_tmo[ti][tmoi], - eo_ctx->test_ev[ti][tmoi]); - em_free(eo_ctx->test_ev[ti][tmoi]); - eo_ctx->test_ev[ti][tmoi] = EM_EVENT_UNDEF; - } - } - } -} - -static void restart(app_eo_ctx_t *eo_ctx, int count) -{ - if (g_options.recreate) - create_test_timer(eo_ctx); - - /* clear event counts, leave profiles */ - for (unsigned int c = 0; c < m_shm->core_count; c++) - for (unsigned int t = 0; t < g_options.num_timers; t++) - for (unsigned int to = 0; to < g_options.num_tmo; to++) - eo_ctx->cdat[c].count[t][to] = 0; - - eo_ctx->state = STATE_START; - eo_ctx->next_change = count + 2; -} - -static bool handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event, app_msg_t *msgin, uint64_t now) -{ - static unsigned int loops; - em_event_t ev = EM_EVENT_UNDEF; - - (void)eo_ctx; - (void)now; - - trace_add(now, TRACE_OP_HB_RX, em_core_id(), msgin->count, eo_ctx->state, NULL, event); - - if (EXTRA_PRINTS) - APPL_PRINT("."); - - msgin->count++; - if (msgin->count >= eo_ctx->next_change) { /* time to do something */ - /* State machine for test cycle (loop). Runs on heartbeat timeout every second. - * Some time is added between states so startup, printing etc is not causing jitter - * to time stamping - */ - int state = eo_ctx->state; - - switch (state) { - case STATE_START: - if (loops == 0) { - create_test_timer(eo_ctx); - print_timers(); - } - if (EXTRA_PRINTS) - APPL_PRINT("START\n"); - - /* start */ - eo_ctx->start_time = TEST_TIME_FN(); - eo_ctx->state++; /* atomic, go to RUN */ - create_test_timeouts(eo_ctx); - eo_ctx->next_change = msgin->count + g_options.looptime; - break; - - case STATE_RUN: - eo_ctx->state++; /* go to STOP */ - for (unsigned int ti = 0; ti < g_options.num_timers; ti++) { - for (unsigned int tmoi = 0; tmoi < g_options.num_tmo; tmoi++) { - em_status_t rv; - uint64_t t1 = TEST_TIME_FN(); - - rv = em_tmo_cancel(eo_ctx->test_tmo[ti][tmoi], &ev); - profile_add(t1, TEST_TIME_FN(), PROF_TMO_CANCEL, - eo_ctx, em_core_id()); - trace_add(t1, TRACE_OP_TMO_CANCEL, em_core_id(), - ti, tmoi, ev, eo_ctx->test_tmo[ti][tmoi]); - test_fatal_if(rv != EM_ERR_TOONEAR, - "cancel did not return expected TOONEAR!"); - } - } - eo_ctx->next_change = msgin->count + 3; /* enough to get all remaining */ - eo_ctx->stop_time = TEST_TIME_FN(); - break; - - case STATE_STOP: - if (EXTRA_PRINTS) - APPL_PRINT("\nSTOP\n"); - delete_test_timeouts(eo_ctx, false); - delete_test_events(eo_ctx, false); - eo_ctx->state++; /* go to ANALYZE */ - break; - - case STATE_ANALYZE: - loops++; - APPL_PRINT("\n\nLoop completed\n"); - analyze_and_print(eo_ctx, loops); - - if (loops >= g_options.loops) { /* all done, cleanup and summary */ - em_status_t rv = em_tmo_cancel(eo_ctx->heartbeat_tmo, &ev); - - test_fatal_if(rv != EM_OK && rv != EM_ERR_TOONEAR, "HB cncl fail"); - test_fatal_if(ev != EM_EVENT_UNDEF, - "not expecting event on cancel (at receive)"); - eo_ctx->state++; /* go to EXIT next */ - delete_test_timeouts(eo_ctx, true); - delete_test_events(eo_ctx, true); - - global_summary(eo_ctx); - - APPL_PRINT("Done, raising SIGINT!\n"); - trace_add(TEST_TIME_FN(), TRACE_OP_SIGINT, em_core_id(), - loops, -1, NULL, NULL); - raise(SIGINT); - return false; - } - /* next loop, re-start */ - if (g_options.recreate) - delete_test_timer(eo_ctx); - restart(eo_ctx, msgin->count); - break; - - case STATE_EXIT: - if (EXTRA_PRINTS) - APPL_PRINT("EXIT\n"); - return false; /* don't ack anymore */ - - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "State invalid! %d\n", eo_ctx->state); - } - } - - trace_add(TEST_TIME_FN(), TRACE_OP_TMO_ACK, em_core_id(), -1, -1, NULL, event); - - em_status_t stat = em_tmo_ack(msgin->tmo, event); - - if (stat == EM_ERR_CANCELED) - return false; /* free event */ - if (stat != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "HB ack failed, ret %u, count %ld", stat, msgin->count); - return true; -} - -static bool handle_tmo(app_eo_ctx_t *eo_ctx, em_event_t event, uint64_t now) -{ - (void)eo_ctx; - (void)now; - - em_tmo_t tmo; - unsigned int tmri, tmoi; - int core = em_core_id(); - - test_fatal_if(em_tmo_get_type(event, &tmo, false) != EM_TMO_TYPE_PERIODIC, "not a TMO?!"); - - /* event has no user specific content, userptr holds encoded indexes */ - ptr2tmo(em_tmo_get_userptr(event, NULL), &tmri, &tmoi); - test_fatal_if(tmri >= MAX_TEST_TIMERS || tmoi >= MAX_TEST_TMO, - "Too large index, event corrupted?"); - test_fatal_if(tmo != eo_ctx->test_tmo[tmri][tmoi], - "tmo handle [%u][%u] does not match expected %p->%p\n", - tmri, tmoi, eo_ctx->test_tmo[tmri][tmoi], tmo); - - /* use passed rx timestamp for better accuracy. Could still improve by debug timestamps */ - uint64_t tick = em_timer_current_tick(eo_ctx->test_tmr[tmri]); - - trace_add(now, TRACE_OP_TMO_RX, core, tmri, tmoi, (void *)tick, event); - eo_ctx->cdat[core].count[tmri][tmoi]++; - trace_add(TEST_TIME_FN(), TRACE_OP_TMO_ACK, core, tmri, tmoi, tmo, event); - - em_status_t stat; - uint64_t t1 = TEST_TIME_FN(); - - stat = em_tmo_ack(tmo, event); - profile_add(t1, TEST_TIME_FN(), PROF_TMO_ACK, eo_ctx, core); - if (stat == EM_ERR_CANCELED) { /* last event */ - trace_add(TEST_TIME_FN(), TRACE_OP_TMO_ACK_LAST, core, tmri, tmoi, tmo, event); - eo_ctx->last_time[tmri][tmoi] = now; - if (EXTRA_PRINTS) - APPL_PRINT("last timeout[%u][%u]\n", tmri, tmoi); - if (g_options.reuse_ev) { - eo_ctx->test_ev[tmri][tmoi] = event; /* event for tmo re-start */ - return true; /* don't free in receive */ - } - return false; /* now allowed to free */ - } - - test_fatal_if(stat != EM_OK, "Test tmo[%u][%u] ack returned %u!\n", tmri, tmoi, stat); - - if (g_options.delay_us != 0) - extra_delay(&eo_ctx->cdat[core].rnd, core, tmri, tmoi); - - return true; -} - -static void global_summary(app_eo_ctx_t *eo_ctx) -{ - int cores = m_shm->core_count; - - APPL_PRINT("\nGLOBAL SUMMARY:\n"); - - if (g_options.profile) { - APPL_PRINT("\nTiming profiles:\n"); - APPL_PRINT("api count min max avg (ns)\n"); - APPL_PRINT("------------------------------------------------------------------------\n"); - for (int p = 0; p < PROF_TMO_LAST; p++) { - prof_t pdat = { 0 }; - - pdat.min = UINT64_MAX; - for (int c = 0; c < cores; c++) { - if (eo_ctx->cdat[c].prof[p].min < pdat.min) - pdat.min = eo_ctx->cdat[c].prof[p].min; - if (eo_ctx->cdat[c].prof[p].max > pdat.max) - pdat.max = eo_ctx->cdat[c].prof[p].max; - pdat.num += eo_ctx->cdat[c].prof[p].num; - pdat.acc += eo_ctx->cdat[c].prof[p].acc; - } - if (pdat.num == 0) - continue; - APPL_PRINT("%-15s %-15lu %-15lu %-15lu %-15lu\n", - prof_names[p], pdat.num, pdat.min, - pdat.max, pdat.acc / pdat.num); - } - } - - APPL_PRINT("\ncore EO utilization\n"); - APPL_PRINT("---------------------\n"); - for (int c = 0; c < cores; c++) { - double load = (double)eo_ctx->cdat[c].eo_ns / - (double)(eo_ctx->cdat[c].non_eo_ns + eo_ctx->cdat[c].eo_ns); - - APPL_PRINT("%-7d%.2f %%\n", c, load * 100); - } - - /* more analysis from e.g. trace data could be implemented here */ - - APPL_PRINT("\n"); -} - -static void analyze_and_print(app_eo_ctx_t *eo_ctx, int loop) -{ - APPL_PRINT("Analysis for loop %u :\n", loop); - - int cores = m_shm->core_count; - uint64_t counts[MAX_TEST_TIMERS][MAX_TEST_TMO]; - - memset(counts, 0, sizeof(counts)); - for (int i = 0; i < cores ; i++) - for (unsigned int t = 0; t < g_options.num_timers; t++) - for (unsigned int to = 0; to < g_options.num_tmo; to++) - counts[t][to] += eo_ctx->cdat[i].count[t][to]; - - uint64_t total = 0; - - APPL_PRINT("tmr tmo secs tmos ->hz setup_hz error %%\n"); - APPL_PRINT("---------------------------------------------------------------------\n"); - for (unsigned int t = 0; t < g_options.num_timers; t++) { - for (unsigned int to = 0; to < g_options.num_tmo; to++) { - int64_t ttime = (int64_t)eo_ctx->last_time[t][to] - - (int64_t)eo_ctx->first_time[t][to]; - double secs = (double)ttime / 1000000000; - double tested_hz = ((double)(counts[t][to] - 1)) / fabs(secs); - double setup_hz = frac2float(g_options.basehz[t]); - - setup_hz *= g_options.multiplier[to]; - double errorp = ((tested_hz - setup_hz) / setup_hz) * 100; - - APPL_PRINT("%-5u %-5u %-10.4f %-12lu %-12.4f %-12.4f %-12.3f\n", - t, to, secs, counts[t][to], tested_hz, setup_hz, errorp); - total += counts[t][to]; - - /* calculations are invalid if last event was not received */ - if ((int64_t)eo_ctx->last_time[t][to] - - (int64_t)eo_ctx->first_time[t][to] < 1) - APPL_PRINT("WARN: last event for tmo[%u][%u] not received?\n", - t, to); - } - } - - double runsecs = (double)(eo_ctx->stop_time - eo_ctx->start_time) / 1000000000; - - APPL_PRINT("\n%lu total timeouts received in %.3f s -> %.4f M tmo / sec\n\n", - total, runsecs, ((double)total / runsecs) / 1000000); -} - -void test_init(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - int core = em_core_id(); - - /* first core creates shared memory */ - if (core == 0) { - odp_shm = odp_shm_reserve(SHM_NAME, sizeof(timer_app_shm_t), 64, 0); - if (odp_shm == ODP_SHM_INVALID) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "shm init failed on EM-core: %u", core); - } - m_shm = odp_shm_addr(odp_shm); - - /* initialize it */ - if (m_shm) - memset(m_shm, 0, sizeof(timer_app_shm_t)); - else - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "ShMem init failed on EM-core: %u", core); - - if (EXTRA_PRINTS) - APPL_PRINT("%luk shared memory for app context\n", - sizeof(timer_app_shm_t) / 1024); - - /* Store the number of EM-cores running the application */ - m_shm->core_count = appl_conf->core_count; - - if (g_options.tracelen) { - size_t tlen = m_shm->core_count * - g_options.tracelen * sizeof(trace_entry_t); - - odp_shm_trace = odp_shm_reserve(SHM_TRACE_NAME, tlen, 64, 0); - if (odp_shm_trace == ODP_SHM_INVALID) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "trace shm init failed on EM-core: %u", core); - } - m_tracebuf = odp_shm_addr(odp_shm_trace); - if (m_tracebuf) - memset(m_tracebuf, 0, tlen); - if (EXTRA_PRINTS) - APPL_PRINT("%luk shared memory for trace\n", tlen / 1024); - } else { - odp_shm_trace = ODP_SHM_INVALID; - } - } else { - /* lookup memory from core 0 init */ - odp_shm = odp_shm_lookup(SHM_NAME); - test_fatal_if(odp_shm == ODP_SHM_INVALID, "shared mem lookup fail"); - - if (g_options.tracelen) { - odp_shm_trace = odp_shm_lookup(SHM_TRACE_NAME); - test_fatal_if(odp_shm_trace == ODP_SHM_INVALID, - "trace shared mem lookup fail"); - } - - m_shm = odp_shm_addr(odp_shm); - if (!m_shm) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "ShMem init failed on EM-core: %u", core); - } - - if (EXTRA_PRINTS) - APPL_PRINT("Shared mem at %p on core %d\n", m_shm, core); - - if (g_options.tracelen) { - m_tracebuf = odp_shm_addr(odp_shm_trace); - if (m_tracebuf == NULL) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Trace ShMem adj failed on EM-core: %u", core); - m_tracebuf += g_options.tracelen * core; - if (EXTRA_PRINTS) - APPL_PRINT("Trace buffer at %p on core %d\n", m_tracebuf, core); - } - - mlockall(MCL_FUTURE); - if (EXTRA_PRINTS) - APPL_PRINT("core %d: %s done, shm @%p\n", core, __func__, m_shm); -} - -/** - * Startup of the timer ring test EM application - */ -void test_start(const appl_conf_t *appl_conf) -{ - em_eo_t eo; - em_timer_attr_t attr; - em_queue_t queue; - em_status_t stat; - app_eo_ctx_t *eo_ctx; - - if (appl_conf->num_procs > 1) { - APPL_PRINT("\nPROCESS MODE is not yet supported!\n"); - abort(); - } - - fix_setup(); - - eo_ctx = &m_shm->eo_context; - memset(eo_ctx, 0, sizeof(app_eo_ctx_t)); - - eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, - app_eo_stop, app_eo_stop_local, app_eo_receive, - eo_ctx); - test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); - - stat = em_register_error_handler(my_error_handler); - test_fatal_if(stat != EM_OK, "Failed to register error handler"); - - /* parallel high priority for timeout handling*/ - queue = em_queue_create("Tmo Q", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_HIGHEST, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create test queue!"); - eo_ctx->test_q = queue; - - /* another normal priority for heartbeat */ - queue = em_queue_create("HB Q", - EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to add HB queue!"); - eo_ctx->hb_q = queue; - - /* create HB timer */ - em_timer_attr_init(&attr); - strncpy(attr.name, "HBTimer", EM_TIMER_NAME_LEN); - m_shm->hb_tmr = em_timer_create(&attr); - test_fatal_if(m_shm->hb_tmr == EM_TIMER_UNDEF, - "Failed to create HB timer!"); - - trace_add(TEST_TIME_FN(), TRACE_OP_TMR_CREATE, em_core_id(), -1, -1, NULL, m_shm->hb_tmr); - - em_timer_capability_t capa = { 0 }; - - stat = em_timer_capability(&capa, g_options.clksrc); - test_fatal_if(stat != EM_OK, "em_timer_capability returned error for clk %u\n", - g_options.clksrc); - test_fatal_if(capa.ring.max_rings == 0, "Ring timers not supported!"); - - APPL_PRINT("Timer ring capability for clksrc %d:\n", g_options.clksrc); - APPL_PRINT(" maximum timers: %d\n", capa.ring.max_rings); - - double hz = frac2float(capa.ring.min_base_hz); - - APPL_PRINT(" minimum base_hz: %.3f\n", hz); - hz = frac2float(capa.ring.max_base_hz); - APPL_PRINT(" maximum base_hz: %.3f\n", hz); - - /* Start EO */ - stat = em_eo_start_sync(eo, NULL, NULL); - test_fatal_if(stat != EM_OK, "Failed to start EO!"); -} - -void test_stop(const appl_conf_t *appl_conf) -{ - if (appl_conf->num_procs > 1) { - APPL_PRINT("%s(): skip\n", __func__); - return; - } - - em_eo_t eo = em_eo_find(APP_EO_NAME); - - test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); - - em_status_t ret = em_eo_stop_sync(eo); - - test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - - ret = em_timer_delete(m_shm->hb_tmr); - test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", - m_shm->hb_tmr, ret); - m_shm->hb_tmr = EM_TIMER_UNDEF; - - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete Qs:%" PRI_STAT "", eo, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - - em_unregister_error_handler(); - APPL_PRINT("test_stopped\n"); -} - -void test_term(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - - if (m_shm != NULL) { - odp_shm_free(odp_shm); - m_shm = NULL; - odp_shm = ODP_SHM_INVALID; - } - if (odp_shm_trace != ODP_SHM_INVALID) { - odp_shm_free(odp_shm_trace); - odp_shm_trace = ODP_SHM_INVALID; - } -} - -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - app_msg_t *msg; - app_eo_ctx_t *eo_ctx = (app_eo_ctx_t *)eo_context; - - (void)eo; - (void)conf; - - odp_ticketlock_init(&tracelock); - print_setup(); - - for (unsigned int t = 0; t < g_options.num_timers; t++) - for (unsigned int to = 0; to < g_options.num_tmo; to++) { - eo_ctx->test_tmo[t][to] = EM_TMO_UNDEF; - eo_ctx->test_ev[t][to] = EM_EVENT_UNDEF; - } - - /* create periodic timeout for heartbeat */ - eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->hb_tmr, EM_TMO_FLAG_PERIODIC, eo_ctx->hb_q); - test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, - "Can't allocate heartbeat_tmo!\n"); - - em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); - - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", - sizeof(app_msg_t)); - - msg = em_event_pointer(event); - msg->count = 0; - msg->type = MSGTYPE_HB; - msg->tmo = eo_ctx->heartbeat_tmo; - uint64_t hb_hz = em_timer_get_freq(m_shm->hb_tmr); - - if (hb_hz < 10) - APPL_ERROR("WARNING: HB timer hz very low!?\n"); - - em_timer_tick_t period = hb_hz; /* 1s HB */ - - test_fatal_if(period < 1, "HB timer resolution is too low!\n"); - - eo_ctx->state = STATE_START; - eo_ctx->next_change = 2; - - if (g_options.profile) { - for (unsigned int c = 0; c < m_shm->core_count; c++) - for (int p = 0; p < NUM_PROFILES; p++) - eo_ctx->cdat[c].prof[p].min = UINT64_MAX; - } - - /* start heartbeat */ - em_status_t stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, period, event); - - test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); - trace_add(TEST_TIME_FN(), TRACE_OP_TMO_SET, em_core_id(), - -1, -1, NULL, eo_ctx->heartbeat_tmo); - - if (EXTRA_PRINTS) - APPL_PRINT("WARNING: extra prints enabled, expect some timing jitter\n"); - - stat = em_dispatch_register_enter_cb(enter_cb); - test_fatal_if(stat != EM_OK, "enter_cb() register failed!"); - stat = em_dispatch_register_exit_cb(exit_cb); - test_fatal_if(stat != EM_OK, "exit_cb() register failed!"); - - return EM_OK; -} - -/** - * @private - * - * EO per thread start function. - */ -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - int core = em_core_id(); - - (void)eo; - (void)eo_ctx; - - test_fatal_if(core >= MAX_CORES, "Too many cores!"); - - memset(&eo_ctx->cdat[core].rnd, 0, sizeof(rnd_state_t)); - initstate_r(time(NULL), eo_ctx->cdat[core].rnd.rndstate, RND_STATE_BUF, - &eo_ctx->cdat[core].rnd.rdata); - srandom(time(NULL)); - m_tracecount = 0; - trace_add(TEST_TIME_FN(), TRACE_OP_START, core, -1, -1, NULL, NULL); - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - em_event_t event = EM_EVENT_UNDEF; - - (void)eo; - - if (EXTRA_PRINTS) - APPL_PRINT("EO stop\n"); - - if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { - em_tmo_delete(eo_ctx->heartbeat_tmo, &event); - eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; - if (event != EM_EVENT_UNDEF) - em_free(event); - } - - delete_test_timer(eo_context); - - em_status_t ret = em_dispatch_unregister_enter_cb(enter_cb); - - test_fatal_if(ret != EM_OK, "enter_cb() unregister:%" PRI_STAT, ret); - ret = em_dispatch_unregister_exit_cb(exit_cb); - test_fatal_if(ret != EM_OK, "exit_cb() unregister:%" PRI_STAT, ret); - - if (EXTRA_PRINTS) - APPL_PRINT("EO stop done\n"); - return EM_OK; -} - -static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo) -{ - (void)eo; - - trace_add(TEST_TIME_FN(), TRACE_OP_END, em_core_id(), -1, -1, NULL, NULL); - - /* dump trace */ - if (g_options.tracelen) { - odp_ticketlock_lock(&tracelock); /* serialize printing */ - dump_trace(eo_context); - odp_ticketlock_unlock(&tracelock); - } - - return EM_OK; -} - -/* EO receive function */ -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, - void *q_context) -{ - uint64_t now = TEST_TIME_FN(); - app_eo_ctx_t *const eo_ctx = eo_context; - bool reuse = false; - - (void)q_context; - (void)queue; - - /* heartbeat */ - if (type == EM_EVENT_TYPE_SW) { - app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); - - switch (msgin->type) { - case MSGTYPE_HB: /* uses atomic queue */ - reuse = handle_heartbeat(eo_ctx, event, msgin, now); - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid msg received!\n"); - } - } else if (type == EM_EVENT_TYPE_TIMER_IND) { /* test timeout */ - reuse = handle_tmo(eo_ctx, event, now); /* uses parallel queue */ - } else { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event type %u!\n", type); - } - - if (!reuse) { - if (type == EM_EVENT_TYPE_TIMER_IND) { /* extra trace */ - unsigned int tmri, tmoi; - - ptr2tmo(em_tmo_get_userptr(event, NULL), &tmri, &tmoi); - trace_add(TEST_TIME_FN(), TRACE_OP_TMO_EV_FREE, em_core_id(), - tmri, tmoi, eo_ctx->test_tmo[tmri][tmoi], event); - eo_ctx->test_ev[tmri][tmoi] = EM_EVENT_UNDEF; - } - em_free(event); - } -} - -int main(int argc, char *argv[]) -{ - /* pick app-specific arguments after '--' */ - int i; - - APPL_PRINT("EM periodic ring timer test %s\n\n", VERSION); - - for (i = 1; i < argc; i++) { - if (!strcmp(argv[i], "--")) - break; - } - if (i < argc) { - if (!parse_my_args(i, argc, argv)) { - APPL_PRINT("Invalid application arguments\n"); - return 1; - } - } - - return cm_setup(argc, argv); -} +/* + * Copyright (c) 2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine timer ring test for alternative periodic timeouts. + * + * see instruction text at timer_test_ring.h. + * + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "timer_test_ring.h" + +#define VERSION "WIP v0.3" +struct { + unsigned int loops; + em_fract_u64_t basehz[MAX_TEST_TIMERS]; + uint64_t multiplier[MAX_TEST_TIMERS]; + uint64_t res_ns[MAX_TEST_TIMERS]; + uint64_t max_mul[MAX_TEST_TMO]; + uint64_t start_offset[MAX_TEST_TMO]; + unsigned int num_timers; + unsigned int looptime; + bool recreate; + bool reuse_tmo; + bool reuse_ev; + bool profile; + unsigned int num_tmo; + int64_t delay_us; + em_timer_clksrc_t clksrc; + unsigned int tracelen; + char tracefile[MAX_FILENAME]; + +} g_options = { + .loops = 1, + .basehz = { {100, 0, 0 } }, /* per timer */ + .multiplier = { 1 }, /* per timerout */ + .res_ns = { 0 }, /* per timer */ + .max_mul = { 8 }, /* per timer */ + .start_offset = { 0 }, /* per timeout */ + .num_timers = 1, + .looptime = 30, + .recreate = false, + .reuse_tmo = false, + .reuse_ev = false, + .profile = false, + .num_tmo = 1, + .delay_us = 0, + .clksrc = 0, + .tracelen = 0, + .tracefile = "stdout" +}; + +static timer_app_shm_t *m_shm; +static odp_shm_t odp_shm; +static odp_shm_t odp_shm_trace; +static __thread trace_entry_t *m_tracebuf; +static __thread unsigned int m_tracecount; +odp_ticketlock_t tracelock; + +/* --------------------------------------- */ +static void usage(void); +static int parse_my_args(int first, int argc, char *argv[]); +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); +static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo); +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, void *q_context); +static em_status_t my_error_handler(em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args); +static bool handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event, + app_msg_t *msgin, uint64_t now); +static bool handle_tmo(app_eo_ctx_t *eo_ctx, em_event_t event, uint64_t now); +static void analyze_and_print(app_eo_ctx_t *eo_ctx, int loop); +static void global_summary(app_eo_ctx_t *eo_ctx); +static void print_setup(void); +static void restart(app_eo_ctx_t *eo_ctx, int count); +static void delete_test_timer(app_eo_ctx_t *eo_ctx); +static void create_test_timer(app_eo_ctx_t *eo_ctx); +static int split_list(char *str, uint64_t *list, int maxnum); +static int split_float_list(char *str, em_fract_u64_t *list, int maxnum); +static void approx_fract(double f, em_fract_u64_t *fract); +static void fix_setup(void); +static void create_test_timeouts(app_eo_ctx_t *eo_ctx); +static void delete_test_timeouts(app_eo_ctx_t *eo_ctx, bool force); +static void delete_test_events(app_eo_ctx_t *eo_ctx, bool force); +static void dump_trace(app_eo_ctx_t *eo_ctx); +static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx); +static void exit_cb(em_eo_t eo); +static void extra_delay(rnd_state_t *rnd, int core, unsigned int tmri, unsigned int tmoi); + +/* --------------------------------------- */ +em_status_t my_error_handler(em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args) +{ + if (escope == 0xDEAD) { /* test_fatal_if */ + char *file = va_arg(args, char*); + const char *func = va_arg(args, const char*); + const int line = va_arg(args, const int); + const char *format = va_arg(args, const char*); + const char *base = basename(file); + + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wformat-nonliteral" + fprintf(stderr, "FATAL - %s:%d, %s():\n", + base, line, func); + vfprintf(stderr, format, args); + #pragma GCC diagnostic pop + } + return test_error_handler(eo, error, escope, args); +} + +static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx) +{ + app_eo_ctx_t *const my_eo_ctx = *eo_ctx; + int core = em_core_id(); + + (void)eo; + (void)queue; + (void)q_ctx; + (void)events; + (void)num; + + if (unlikely(!my_eo_ctx)) + return; + + my_eo_ctx->cdat[core].enter_ns = TEST_TIME_FN(); + if (likely(my_eo_ctx->cdat[core].exit_ns)) + my_eo_ctx->cdat[core].non_eo_ns += my_eo_ctx->cdat[core].enter_ns - + my_eo_ctx->cdat[core].exit_ns; +} + +static void exit_cb(em_eo_t eo) +{ + app_eo_ctx_t *const my_eo_ctx = em_eo_get_context(eo); + int core = em_core_id(); + + if (unlikely(!my_eo_ctx)) + return; + + my_eo_ctx->cdat[core].exit_ns = TEST_TIME_FN(); + my_eo_ctx->cdat[core].eo_ns += my_eo_ctx->cdat[core].exit_ns - + my_eo_ctx->cdat[core].enter_ns; +} + +static inline double frac2float(em_fract_u64_t frac) +{ + double f = frac.integer; + + if (frac.numer) + f += (double)frac.numer / (double)frac.denom; + return f; +} + +static inline void *tmo2ptr(unsigned int tmr, unsigned int tmo) +{ + return (void *)(((uint64_t)tmr << 16) + tmo); +} + +static inline void ptr2tmo(void *ptr, unsigned int *tmr, unsigned int *tmo) +{ + uint64_t x = (uint64_t)ptr; + + *tmo = x & 0xFFFF; + *tmr = x >> 16; +} + +static inline void profile_add(uint64_t t1, uint64_t t2, prof_apis api, + app_eo_ctx_t *eo_ctx, int core) +{ + uint64_t diff = t2 - t1; + + if (eo_ctx->cdat[core].prof[api].min > diff) + eo_ctx->cdat[core].prof[api].min = diff; + if (eo_ctx->cdat[core].prof[api].max < diff) + eo_ctx->cdat[core].prof[api].max = diff; + eo_ctx->cdat[core].prof[api].acc += diff; + eo_ctx->cdat[core].prof[api].num++; +} + +static inline void trace_add(uint64_t ts, trace_op_t op, uint32_t val, + int64_t arg1, int64_t arg2, void *arg3, void *arg4) +{ + trace_entry_t *tp; + + if (g_options.tracelen == 0) + return; /* disabled */ + + if (unlikely(m_tracecount >= g_options.tracelen)) { /* overflow marker */ + tp = &m_tracebuf[g_options.tracelen - 1]; + tp->ns = TEST_TIME_FN(); + tp->op = TRACE_OP_LAST; + tp->val = val; + tp->arg1 = arg1; + tp->arg2 = arg2; + tp->arg3 = arg3; + tp->arg4 = arg4; + return; + } + + tp = &m_tracebuf[m_tracecount]; + tp->ns = ts; + tp->op = op; + tp->val = val; + tp->arg1 = arg1; + tp->arg2 = arg2; + tp->arg3 = arg3; + tp->arg4 = arg4; + + m_tracecount++; +} + +static void extra_delay(rnd_state_t *rnd, int core, unsigned int tmri, unsigned int tmoi) +{ + uint64_t t1 = TEST_TIME_FN(); + uint64_t ns; + + if (g_options.delay_us < 0) { /* random */ + int32_t r1; + + random_r(&rnd->rdata, &r1); + ns = (uint64_t)r1 % (1000 * (labs(g_options.delay_us) + 1)); + } else { + ns = g_options.delay_us * 1000UL; + } + + trace_add(TEST_TIME_FN(), TRACE_OP_DELAY, core, tmri, tmoi, (void *)ns, NULL); + while (TEST_TIME_FN() < (ns + t1)) { + /* delay */ + }; +} + +static void dump_trace(app_eo_ctx_t *eo_ctx) +{ + static bool title = true; /* header once */ + + if (g_options.tracelen == 0) + return; + + FILE *df = stdout; + + if (strcmp(g_options.tracefile, "stdout")) + df = fopen(g_options.tracefile, title ? "w" : "a"); + + if (!df) { + APPL_PRINT("Failed to open dump file!\n"); + return; + } + + if (title) { + fprintf(df, "#BEGIN RING TRACE FORMAT 1\n"); + /* dump setup */ + fprintf(df, "cores,loops,num_timer,num_tmo,recreate_tmr,reuse_tmo,reuse_ev,delay_us,tracelen,ver\n"); + fprintf(df, "%u,%u,%u,%u,%u,%u,%u,%ld,%u,%s\n", m_shm->core_count, + g_options.loops, g_options.num_timers, g_options.num_tmo, + g_options.recreate, g_options.reuse_tmo, g_options.reuse_ev, + g_options.delay_us, g_options.tracelen, VERSION); + /* dump timeouts */ + fprintf(df, "#TMO:\ntmr,tmo,tick_hz,res_ns,base_hz,mul,startrel\n"); + for (unsigned int tmr = 0; tmr < g_options.num_timers; tmr++) + for (unsigned int tmo = 0; tmo < g_options.num_tmo; tmo++) { + fprintf(df, "%u,%u,%lu,%lu,%f,%lu,%lu\n", + tmr, tmo, eo_ctx->tick_hz[tmr], g_options.res_ns[tmr], + frac2float(g_options.basehz[tmr]), + g_options.multiplier[tmo], g_options.start_offset[tmo]); + } + + /* and then the trace events */ + fprintf(df, "#EVENTS:\n"); + fprintf(df, "core,ns,op,arg1,arg2,arg3,arg4\n"); + } + + for (uint32_t count = 0; count < m_tracecount; count++) { + trace_entry_t *tp = &m_tracebuf[count]; + + test_fatal_if(tp->op > TRACE_OP_LAST, "Invalid trace op %u!", tp->op); + + fprintf(df, "%u,%lu,%s,%ld,%ld,%p,%p\n", + tp->val, tp->ns, trace_op_labels[tp->op], + tp->arg1, tp->arg2, tp->arg3, tp->arg4); + } + + if (df != stdout) + fclose(df); + title = false; +} + +static void usage(void) +{ + printf("%s\n", instructions); + + printf("Options:\n"); + for (int i = 0; ; i++) { + if (longopts[i].name == NULL || descopts[i] == NULL) + break; + printf("--%s or -%c: %s\n", longopts[i].name, longopts[i].val, descopts[i]); + } +} + +static void print_timers(void) +{ + em_timer_attr_t attr; + em_timer_t tmr[PRINT_MAX_TMRS]; + + int num_timers = em_timer_get_all(tmr, PRINT_MAX_TMRS); + + for (int i = 0; i < (num_timers > PRINT_MAX_TMRS ? PRINT_MAX_TMRS : num_timers); i++) { + test_fatal_if(em_timer_get_attr(tmr[i], &attr) != EM_OK, "Can't get timer info\n"); + + APPL_PRINT("Timer \"%s\" info:\n", attr.name); + APPL_PRINT(" -is ring: "); + if (attr.flags & EM_TIMER_FLAG_RING) { + double hz = frac2float(attr.ringparam.base_hz); + + APPL_PRINT(" yes (base_hz %.3f, max_mul %lu)\n", + hz, attr.ringparam.max_mul); + APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.ringparam.res_ns); + APPL_PRINT(" -clk_src: %d\n", attr.ringparam.clk_src); + } else { + APPL_PRINT("no\n"); + APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); + APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); + APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); + } + APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); + APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", em_timer_get_freq(tmr[i])); + } +} + +/* adjust timer setup basehz,mul to same lengths */ +static void fix_setup(void) +{ + for (unsigned int i = 1; i < g_options.num_timers; i++) { + if (g_options.basehz[i].integer == 0) { + g_options.basehz[i].integer = 100; + g_options.basehz[i].numer = 0; + } + if (g_options.max_mul[i] == 0) + g_options.max_mul[i] = 8; + /* res 0 is ok = default */ + } + for (unsigned int i = 1; i < g_options.num_tmo; i++) { + if (g_options.multiplier[i] == 0) + g_options.multiplier[i] = 1; + } + + if (g_options.recreate && g_options.reuse_tmo) { + APPL_PRINT("\nWARNING: Can't recreate timers AND reuse tmo, reuse disabled\n"); + g_options.reuse_tmo = false; + } +} + +/* separate comma limited integer argument */ +static int split_list(char *str, uint64_t *list, int maxnum) +{ + int num = 0; + char *p = strtok(str, ","); + + while (p) { + list[num] = (uint64_t)atoll(p); + num++; + if (num >= maxnum) + break; + p = strtok(NULL, ","); + } + + return num; +} + +/* this could be better, but for now just use fixed point to 100th */ +static void approx_fract(double val, em_fract_u64_t *fract) +{ + double intp; + double p = modf(val, &intp); + + fract->numer = round(100 * p); + fract->denom = 100; +} + +/* separate comma limited float argument */ +static int split_float_list(char *str, em_fract_u64_t *list, int maxnum) +{ + int num = 0; + char *p = strtok(str, ","); + + while (p) { + list[num].integer = (uint64_t)atoll(p); + approx_fract(atof(p), &list[num]); + num++; + if (num >= maxnum) + break; + p = strtok(NULL, ","); + } + + return num; +} + +static int parse_my_args(int first, int argc, char *argv[]) +{ + optind = first + 1; /* skip '--' */ + while (1) { + int opt; + int long_index; + char *endptr; + long num; + + opt = getopt_long(argc, argv, shortopts, longopts, &long_index); + + if (opt == -1) + break; /* No more options */ + + switch (opt) { + case 'b': { + uint64_t hz[MAX_TEST_TIMERS]; + + num = split_list(optarg, hz, MAX_TEST_TIMERS); + if (num < 1) + return 0; + for (int i = 0; i < num; i++) { + g_options.basehz[i].integer = hz[i]; + g_options.basehz[i].numer = 0; + } + if (num > g_options.num_timers) + g_options.num_timers = num; + } + break; + + case 'f': + num = split_float_list(optarg, &g_options.basehz[0], MAX_TEST_TIMERS); + if (num < 1) + return 0; + if (num > g_options.num_timers) + g_options.num_timers = num; + break; + + case 'l': + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.loops = (unsigned int)num; + break; + + case 't': + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.looptime = (int)num; + break; + + case 'c': + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.clksrc = (em_timer_clksrc_t)num; + break; + + case 'n': + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.num_tmo = (unsigned int)num; + break; + + case 'T': + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.tracelen = (unsigned int)num; + break; + + case 'd': + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0') + return 0; + g_options.delay_us = (int64_t)num; + break; + + case 'w': { /* optional arg */ + if (optarg != NULL) { + if (strlen(optarg) >= MAX_FILENAME) + return 0; + strncpy(g_options.tracefile, optarg, MAX_FILENAME); + } + } + break; + + case 'r': + num = split_list(optarg, &g_options.res_ns[0], MAX_TEST_TIMERS); + if (num < 1) + return 0; + if (num > g_options.num_timers) + g_options.num_timers = num; + break; + + case 'm': + num = split_list(optarg, &g_options.multiplier[0], MAX_TEST_TMO); + if (num < 1) + return 0; + break; + + case 'o': + num = split_list(optarg, &g_options.start_offset[0], MAX_TEST_TMO); + if (num < 1) + return 0; + break; + + case 'M': + num = split_list(optarg, &g_options.max_mul[0], MAX_TEST_TIMERS); + if (num < 1) + return 0; + if (num > g_options.num_timers) + g_options.num_timers = num; + break; + + case 'R': + g_options.recreate = true; + break; + + case 'a': + g_options.profile = true; + break; + + case 'N': + g_options.reuse_tmo = true; + break; + + case 'E': + g_options.reuse_ev = true; + break; + + case 'h': + default: + opterr = 0; + usage(); + return 0; + } + } + + optind = 1; /* cm_setup() to parse again */ + return 1; +} + +void print_setup(void) +{ + APPL_PRINT("\nActive run options:\n"); + APPL_PRINT(" - loops: %d\n", g_options.loops); + APPL_PRINT(" - looptime: %d s\n", g_options.looptime); + APPL_PRINT(" - num timers: %d\n", g_options.num_timers); + APPL_PRINT(" - tmo per timer: %d\n", g_options.num_tmo); + APPL_PRINT(" - recreate timer: %s\n", g_options.recreate ? "yes" : "no"); + APPL_PRINT(" - reuse tmo: %s\n", g_options.reuse_tmo ? "yes" : "no"); + APPL_PRINT(" - reuse event: %s\n", g_options.reuse_ev ? "yes" : "no"); + if (g_options.tracelen) { + APPL_PRINT(" - tracebuf: %u\n", g_options.tracelen); + APPL_PRINT(" - tracefile: %s\n", g_options.tracefile); + } + APPL_PRINT(" - profile APIs: %s\n", g_options.profile ? "yes" : "no"); + APPL_PRINT(" - extra delay: %ld us %s\n", labs(g_options.delay_us), + g_options.delay_us < 0 ? "(rnd)" : ""); + + APPL_PRINT("\nTimer tmo basehz max_mul res_ns startrel mul ->hz\n"); + + for (unsigned int i = 0; i < g_options.num_timers; i++) { + double hz = frac2float(g_options.basehz[i]); + + for (unsigned int t = 0; t < g_options.num_tmo; t++) { + APPL_PRINT("%-5u %-4u %-15.3f %-9lu %-14lu %-12lu %-8lu %.3f\n", + i, t, hz, g_options.max_mul[i], + g_options.res_ns[i], g_options.start_offset[t], + g_options.multiplier[t], hz * (double)g_options.multiplier[t]); + } + } + APPL_PRINT("\n"); +} + +static void delete_test_timer(app_eo_ctx_t *eo_ctx) +{ + for (unsigned int i = 0; i < g_options.num_timers; i++) { + if (eo_ctx->test_tmr[i] != EM_TIMER_UNDEF) { + trace_add(TEST_TIME_FN(), TRACE_OP_TMR_DELETE, em_core_id(), + i, -1, NULL, eo_ctx->test_tmr[i]); + em_status_t rv = em_timer_delete(eo_ctx->test_tmr[i]); + + test_fatal_if(rv != EM_OK, "Ring timer[%d] delete fail, rv %d!", i, rv); + APPL_PRINT("Deleted test timer[%d]: %p\n", i, eo_ctx->test_tmr[i]); + eo_ctx->test_tmr[i] = EM_TIMER_UNDEF; + } + } +} + +static void create_test_timer(app_eo_ctx_t *eo_ctx) +{ + em_timer_attr_t rattr; + + for (unsigned int i = 0; i < g_options.num_timers; i++) { + em_status_t stat = em_timer_ring_attr_init(&rattr, + g_options.clksrc, + g_options.basehz[i].integer, + g_options.max_mul[i], + g_options.res_ns[i]); + + if (g_options.basehz[i].numer) { + rattr.ringparam.base_hz.numer = g_options.basehz[i].numer; + rattr.ringparam.base_hz.denom = g_options.basehz[i].denom; + } + if (EXTRA_PRINTS) { + APPL_PRINT("\nInitialized ring attr:\n"); + APPL_PRINT(" -clksrc: %u\n", g_options.clksrc); + APPL_PRINT(" -num_tmo: %u\n", rattr.num_tmo); + APPL_PRINT(" -base_hz: %" PRIu64 "\n", rattr.ringparam.base_hz.integer); + APPL_PRINT(" -base_hz n/d: %lu/%lu\n", rattr.ringparam.base_hz.numer, + rattr.ringparam.base_hz.denom); + APPL_PRINT(" -max_mul: %lu\n", rattr.ringparam.max_mul); + APPL_PRINT(" -res_ns: %lu\n", rattr.ringparam.res_ns); + } + + if (stat != EM_OK) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Ring parameters not supported, ret %u!", stat); + } + + if (g_options.basehz[i].numer) { /* re-check values */ + em_timer_ring_param_t ring = rattr.ringparam; + + if (em_timer_ring_capability(&rattr.ringparam) == EM_ERR_NOT_SUPPORTED) { + APPL_PRINT("WARN: Arguments not exactly supported:\n"); + + APPL_PRINT("base_hz: %lu %lu/%lu -> %lu %lu/%lu\n", + ring.base_hz.integer, ring.base_hz.numer, + ring.base_hz.denom, rattr.ringparam.base_hz.integer, + rattr.ringparam.base_hz.numer, + rattr.ringparam.base_hz.denom); + APPL_PRINT("max_mul: %lu -> %lu\n", + ring.max_mul, rattr.ringparam.max_mul); + APPL_PRINT("res_ns: %lu -> %lu\n", + ring.res_ns, rattr.ringparam.res_ns); + } + } + strncpy(rattr.name, "RingTmr", EM_TIMER_NAME_LEN); + em_timer_t rtmr = em_timer_ring_create(&rattr); + + trace_add(TEST_TIME_FN(), TRACE_OP_TMR_CREATE, em_core_id(), i, -1, NULL, rtmr); + test_fatal_if(rtmr == EM_TIMER_UNDEF, "Ring timer create fail!"); + eo_ctx->test_tmr[i] = rtmr; + eo_ctx->tick_hz[i] = em_timer_get_freq(rtmr); + if (EXTRA_PRINTS) + APPL_PRINT("Created test timer[%d]: %p\n", i, rtmr); + } /* next timer */ +} + +static void create_test_timeouts(app_eo_ctx_t *eo_ctx) +{ + /* create test timeout(s) */ + for (unsigned int t = 0; t < g_options.num_timers; t++) { + for (unsigned int to = 0; to < g_options.num_tmo; to++) { + em_tmo_args_t args = { .userptr = tmo2ptr(t, to) }; + + if (eo_ctx->test_tmo[t][to] == EM_TMO_UNDEF) { + uint64_t t1 = TEST_TIME_FN(); + + eo_ctx->test_tmo[t][to] = em_tmo_create_arg(eo_ctx->test_tmr[t], + EM_TMO_FLAG_PERIODIC, + eo_ctx->test_q, + &args); + profile_add(t1, TEST_TIME_FN(), PROF_TMO_CREATE, + eo_ctx, em_core_id()); + trace_add(t1, TRACE_OP_TMO_CREATE, em_core_id(), + t, to, eo_ctx->test_tmr[t], eo_ctx->test_tmo[t][to]); + test_fatal_if(eo_ctx->test_tmo[t][to] == EM_TMO_UNDEF, + "Can't allocate test_tmo!\n"); + } + + uint64_t tick_now = em_timer_current_tick(eo_ctx->test_tmr[t]); + uint64_t t1 = TEST_TIME_FN(); + uint64_t startabs = 0; + + if (g_options.start_offset[to]) + startabs = tick_now + g_options.start_offset[to]; + trace_add(t1, TRACE_OP_TMO_SET, em_core_id(), + t, to, (void *)tick_now, eo_ctx->test_ev[t][to]); + t1 = TEST_TIME_FN(); + em_status_t stat = em_tmo_set_periodic_ring(eo_ctx->test_tmo[t][to], + startabs, + g_options.multiplier[to], + eo_ctx->test_ev[t][to]); + + profile_add(t1, TEST_TIME_FN(), PROF_TMO_SET, eo_ctx, em_core_id()); + test_fatal_if(stat != EM_OK, "Can't activate test tmo[%d][%d], ret %u!\n", + t, to, stat); + eo_ctx->first_time[t][to] = t1; + eo_ctx->test_ev[t][to] = EM_EVENT_UNDEF; /* now given to timer */ + } + } +} + +static void delete_test_timeouts(app_eo_ctx_t *eo_ctx, bool force) +{ + int core = em_core_id(); + + /* force == true means final cleanup, otherwise may skip if reuse option is active */ + + for (unsigned int ti = 0; ti < g_options.num_timers; ti++) { + for (unsigned int tmoi = 0; tmoi < g_options.num_tmo; tmoi++) { + if (eo_ctx->test_tmo[ti][tmoi] == EM_TMO_UNDEF) + continue; + + em_tmo_state_t s = em_tmo_get_state(eo_ctx->test_tmo[ti][tmoi]); + + test_fatal_if(s == EM_TMO_STATE_ACTIVE, + "Unexpected tmo state ACTIVE after cancel\n"); + + if (!g_options.reuse_tmo || force) { + em_event_t ev = EM_EVENT_UNDEF; + uint64_t t1 = TEST_TIME_FN(); + + trace_add(t1, TRACE_OP_TMO_DELETE, core, + ti, tmoi, NULL, eo_ctx->test_tmo[ti][tmoi]); + + em_tmo_state_t tmo_state; + + tmo_state = em_tmo_get_state(eo_ctx->test_tmo[ti][tmoi]); + if (tmo_state == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->test_tmo[ti][tmoi], &ev); + + em_status_t rv = em_tmo_delete(eo_ctx->test_tmo[ti][tmoi]); + + profile_add(t1, TEST_TIME_FN(), PROF_TMO_DELETE, eo_ctx, core); + test_fatal_if(rv != EM_OK, "tmo_delete fail, tmo = %p!", + eo_ctx->test_tmo[ti][tmoi]); + test_fatal_if(ev != EM_EVENT_UNDEF, + "Unexpected - tmo delete returned event %p", ev); + eo_ctx->test_tmo[ti][tmoi] = EM_TMO_UNDEF; + } + } + } +} + +static void delete_test_events(app_eo_ctx_t *eo_ctx, bool force) +{ + int core = em_core_id(); + + for (unsigned int ti = 0; ti < g_options.num_timers; ti++) { + for (unsigned int tmoi = 0; tmoi < g_options.num_tmo; tmoi++) { + if (eo_ctx->test_ev[ti][tmoi] == EM_EVENT_UNDEF) + continue; + if (!g_options.reuse_ev || force) { + trace_add(TEST_TIME_FN(), TRACE_OP_TMO_EV_FREE, + core, ti, tmoi, eo_ctx->test_tmo[ti][tmoi], + eo_ctx->test_ev[ti][tmoi]); + em_free(eo_ctx->test_ev[ti][tmoi]); + eo_ctx->test_ev[ti][tmoi] = EM_EVENT_UNDEF; + } + } + } +} + +static void restart(app_eo_ctx_t *eo_ctx, int count) +{ + if (g_options.recreate) + create_test_timer(eo_ctx); + + /* clear event counts, leave profiles */ + for (unsigned int c = 0; c < m_shm->core_count; c++) + for (unsigned int t = 0; t < g_options.num_timers; t++) + for (unsigned int to = 0; to < g_options.num_tmo; to++) + eo_ctx->cdat[c].count[t][to] = 0; + + eo_ctx->state = STATE_START; + eo_ctx->next_change = count + 2; +} + +static bool handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event, app_msg_t *msgin, uint64_t now) +{ + static unsigned int loops; + em_event_t ev = EM_EVENT_UNDEF; + + (void)eo_ctx; + (void)now; + + trace_add(now, TRACE_OP_HB_RX, em_core_id(), msgin->count, eo_ctx->state, NULL, event); + + if (EXTRA_PRINTS) + APPL_PRINT("."); + + msgin->count++; + if (msgin->count >= eo_ctx->next_change) { /* time to do something */ + /* State machine for test cycle (loop). Runs on heartbeat timeout every second. + * Some time is added between states so startup, printing etc is not causing jitter + * to time stamping + */ + int state = eo_ctx->state; + + switch (state) { + case STATE_START: + if (loops == 0) { + create_test_timer(eo_ctx); + print_timers(); + } + if (EXTRA_PRINTS) + APPL_PRINT("START\n"); + + /* start */ + eo_ctx->start_time = TEST_TIME_FN(); + eo_ctx->state++; /* atomic, go to RUN */ + create_test_timeouts(eo_ctx); + eo_ctx->next_change = msgin->count + g_options.looptime; + break; + + case STATE_RUN: + eo_ctx->state++; /* go to STOP */ + for (unsigned int ti = 0; ti < g_options.num_timers; ti++) { + for (unsigned int tmoi = 0; tmoi < g_options.num_tmo; tmoi++) { + em_status_t rv; + uint64_t t1 = TEST_TIME_FN(); + + rv = em_tmo_cancel(eo_ctx->test_tmo[ti][tmoi], &ev); + profile_add(t1, TEST_TIME_FN(), PROF_TMO_CANCEL, + eo_ctx, em_core_id()); + trace_add(t1, TRACE_OP_TMO_CANCEL, em_core_id(), + ti, tmoi, ev, eo_ctx->test_tmo[ti][tmoi]); + test_fatal_if(rv != EM_ERR_TOONEAR, + "cancel did not return expected TOONEAR!"); + } + } + eo_ctx->next_change = msgin->count + 3; /* enough to get all remaining */ + eo_ctx->stop_time = TEST_TIME_FN(); + break; + + case STATE_STOP: + if (EXTRA_PRINTS) + APPL_PRINT("\nSTOP\n"); + delete_test_timeouts(eo_ctx, false); + delete_test_events(eo_ctx, false); + eo_ctx->state++; /* go to ANALYZE */ + break; + + case STATE_ANALYZE: + loops++; + APPL_PRINT("\n\nLoop completed\n"); + analyze_and_print(eo_ctx, loops); + + if (loops >= g_options.loops) { /* all done, cleanup and summary */ + em_status_t rv = em_tmo_cancel(eo_ctx->heartbeat_tmo, &ev); + + test_fatal_if(rv != EM_OK && rv != EM_ERR_TOONEAR, "HB cncl fail"); + test_fatal_if(ev != EM_EVENT_UNDEF, + "not expecting event on cancel (at receive)"); + eo_ctx->state++; /* go to EXIT next */ + delete_test_timeouts(eo_ctx, true); + delete_test_events(eo_ctx, true); + + global_summary(eo_ctx); + + APPL_PRINT("Done, raising SIGINT!\n"); + trace_add(TEST_TIME_FN(), TRACE_OP_SIGINT, em_core_id(), + loops, -1, NULL, NULL); + raise(SIGINT); + return false; + } + /* next loop, re-start */ + if (g_options.recreate) + delete_test_timer(eo_ctx); + restart(eo_ctx, msgin->count); + break; + + case STATE_EXIT: + if (EXTRA_PRINTS) + APPL_PRINT("EXIT\n"); + return false; /* don't ack anymore */ + + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "State invalid! %d\n", eo_ctx->state); + } + } + + trace_add(TEST_TIME_FN(), TRACE_OP_TMO_ACK, em_core_id(), -1, -1, NULL, event); + + em_status_t stat = em_tmo_ack(msgin->tmo, event); + + if (stat == EM_ERR_CANCELED) + return false; /* free event */ + if (stat != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "HB ack failed, ret %u, count %ld", stat, msgin->count); + return true; +} + +static bool handle_tmo(app_eo_ctx_t *eo_ctx, em_event_t event, uint64_t now) +{ + (void)eo_ctx; + (void)now; + + em_tmo_t tmo; + unsigned int tmri, tmoi; + int core = em_core_id(); + + test_fatal_if(em_tmo_get_type(event, &tmo, false) != EM_TMO_TYPE_PERIODIC, "not a TMO?!"); + + /* event has no user specific content, userptr holds encoded indexes */ + ptr2tmo(em_tmo_get_userptr(event, NULL), &tmri, &tmoi); + test_fatal_if(tmri >= MAX_TEST_TIMERS || tmoi >= MAX_TEST_TMO, + "Too large index, event corrupted?"); + test_fatal_if(tmo != eo_ctx->test_tmo[tmri][tmoi], + "tmo handle [%u][%u] does not match expected %p->%p\n", + tmri, tmoi, eo_ctx->test_tmo[tmri][tmoi], tmo); + + /* use passed rx timestamp for better accuracy. Could still improve by debug timestamps */ + uint64_t tick = em_timer_current_tick(eo_ctx->test_tmr[tmri]); + + trace_add(now, TRACE_OP_TMO_RX, core, tmri, tmoi, (void *)tick, event); + eo_ctx->cdat[core].count[tmri][tmoi]++; + trace_add(TEST_TIME_FN(), TRACE_OP_TMO_ACK, core, tmri, tmoi, tmo, event); + + em_status_t stat; + uint64_t t1 = TEST_TIME_FN(); + + stat = em_tmo_ack(tmo, event); + profile_add(t1, TEST_TIME_FN(), PROF_TMO_ACK, eo_ctx, core); + if (stat == EM_ERR_CANCELED) { /* last event */ + trace_add(TEST_TIME_FN(), TRACE_OP_TMO_ACK_LAST, core, tmri, tmoi, tmo, event); + eo_ctx->last_time[tmri][tmoi] = now; + if (EXTRA_PRINTS) + APPL_PRINT("last timeout[%u][%u]\n", tmri, tmoi); + if (g_options.reuse_ev) { + eo_ctx->test_ev[tmri][tmoi] = event; /* event for tmo re-start */ + return true; /* don't free in receive */ + } + return false; /* now allowed to free */ + } + + test_fatal_if(stat != EM_OK, "Test tmo[%u][%u] ack returned %u!\n", tmri, tmoi, stat); + + if (g_options.delay_us != 0) + extra_delay(&eo_ctx->cdat[core].rnd, core, tmri, tmoi); + + return true; +} + +static void global_summary(app_eo_ctx_t *eo_ctx) +{ + int cores = m_shm->core_count; + + APPL_PRINT("\nGLOBAL SUMMARY:\n"); + + if (g_options.profile) { + APPL_PRINT("\nTiming profiles:\n"); + APPL_PRINT("api count min max avg (ns)\n"); + APPL_PRINT("------------------------------------------------------------------------\n"); + for (int p = 0; p < PROF_TMO_LAST; p++) { + prof_t pdat = { 0 }; + + pdat.min = UINT64_MAX; + for (int c = 0; c < cores; c++) { + if (eo_ctx->cdat[c].prof[p].min < pdat.min) + pdat.min = eo_ctx->cdat[c].prof[p].min; + if (eo_ctx->cdat[c].prof[p].max > pdat.max) + pdat.max = eo_ctx->cdat[c].prof[p].max; + pdat.num += eo_ctx->cdat[c].prof[p].num; + pdat.acc += eo_ctx->cdat[c].prof[p].acc; + } + if (pdat.num == 0) + continue; + APPL_PRINT("%-15s %-15lu %-15lu %-15lu %-15lu\n", + prof_names[p], pdat.num, pdat.min, + pdat.max, pdat.acc / pdat.num); + } + } + + APPL_PRINT("\ncore EO utilization\n"); + APPL_PRINT("---------------------\n"); + for (int c = 0; c < cores; c++) { + double load = (double)eo_ctx->cdat[c].eo_ns / + (double)(eo_ctx->cdat[c].non_eo_ns + eo_ctx->cdat[c].eo_ns); + + APPL_PRINT("%-7d%.2f %%\n", c, load * 100); + } + + /* more analysis from e.g. trace data could be implemented here */ + + APPL_PRINT("\n"); +} + +static void analyze_and_print(app_eo_ctx_t *eo_ctx, int loop) +{ + APPL_PRINT("Analysis for loop %u :\n", loop); + + int cores = m_shm->core_count; + uint64_t counts[MAX_TEST_TIMERS][MAX_TEST_TMO]; + + memset(counts, 0, sizeof(counts)); + for (int i = 0; i < cores ; i++) + for (unsigned int t = 0; t < g_options.num_timers; t++) + for (unsigned int to = 0; to < g_options.num_tmo; to++) + counts[t][to] += eo_ctx->cdat[i].count[t][to]; + + uint64_t total = 0; + + APPL_PRINT("tmr tmo secs tmos ->hz setup_hz error %%\n"); + APPL_PRINT("---------------------------------------------------------------------\n"); + for (unsigned int t = 0; t < g_options.num_timers; t++) { + for (unsigned int to = 0; to < g_options.num_tmo; to++) { + int64_t ttime = (int64_t)eo_ctx->last_time[t][to] - + (int64_t)eo_ctx->first_time[t][to]; + double secs = (double)ttime / 1000000000; + double tested_hz = ((double)(counts[t][to] - 1)) / fabs(secs); + double setup_hz = frac2float(g_options.basehz[t]); + + setup_hz *= g_options.multiplier[to]; + double errorp = ((tested_hz - setup_hz) / setup_hz) * 100; + + APPL_PRINT("%-5u %-5u %-10.4f %-12lu %-12.4f %-12.4f %-12.3f\n", + t, to, secs, counts[t][to], tested_hz, setup_hz, errorp); + total += counts[t][to]; + + /* calculations are invalid if last event was not received */ + if ((int64_t)eo_ctx->last_time[t][to] - + (int64_t)eo_ctx->first_time[t][to] < 1) + APPL_PRINT("WARN: last event for tmo[%u][%u] not received?\n", + t, to); + } + } + + double runsecs = (double)(eo_ctx->stop_time - eo_ctx->start_time) / 1000000000; + + APPL_PRINT("\n%lu total timeouts received in %.3f s -> %.4f M tmo / sec\n\n", + total, runsecs, ((double)total / runsecs) / 1000000); +} + +void test_init(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + int core = em_core_id(); + + /* first core creates shared memory */ + if (core == 0) { + odp_shm = odp_shm_reserve(SHM_NAME, sizeof(timer_app_shm_t), 64, 0); + if (odp_shm == ODP_SHM_INVALID) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "shm init failed on EM-core: %u", core); + } + m_shm = odp_shm_addr(odp_shm); + + /* initialize it */ + if (m_shm) + memset(m_shm, 0, sizeof(timer_app_shm_t)); + else + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "ShMem init failed on EM-core: %u", core); + + if (EXTRA_PRINTS) + APPL_PRINT("%luk shared memory for app context\n", + sizeof(timer_app_shm_t) / 1024); + + /* Store the number of EM-cores running the application */ + m_shm->core_count = appl_conf->core_count; + + if (g_options.tracelen) { + size_t tlen = m_shm->core_count * + g_options.tracelen * sizeof(trace_entry_t); + + odp_shm_trace = odp_shm_reserve(SHM_TRACE_NAME, tlen, 64, 0); + if (odp_shm_trace == ODP_SHM_INVALID) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "trace shm init failed on EM-core: %u", core); + } + m_tracebuf = odp_shm_addr(odp_shm_trace); + if (m_tracebuf) + memset(m_tracebuf, 0, tlen); + if (EXTRA_PRINTS) + APPL_PRINT("%luk shared memory for trace\n", tlen / 1024); + } else { + odp_shm_trace = ODP_SHM_INVALID; + } + } else { + /* lookup memory from core 0 init */ + odp_shm = odp_shm_lookup(SHM_NAME); + test_fatal_if(odp_shm == ODP_SHM_INVALID, "shared mem lookup fail"); + + if (g_options.tracelen) { + odp_shm_trace = odp_shm_lookup(SHM_TRACE_NAME); + test_fatal_if(odp_shm_trace == ODP_SHM_INVALID, + "trace shared mem lookup fail"); + } + + m_shm = odp_shm_addr(odp_shm); + if (!m_shm) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "ShMem init failed on EM-core: %u", core); + } + + if (EXTRA_PRINTS) + APPL_PRINT("Shared mem at %p on core %d\n", m_shm, core); + + if (g_options.tracelen) { + m_tracebuf = odp_shm_addr(odp_shm_trace); + if (m_tracebuf == NULL) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Trace ShMem adj failed on EM-core: %u", core); + m_tracebuf += g_options.tracelen * core; + if (EXTRA_PRINTS) + APPL_PRINT("Trace buffer at %p on core %d\n", m_tracebuf, core); + } + + mlockall(MCL_FUTURE); + if (EXTRA_PRINTS) + APPL_PRINT("core %d: %s done, shm @%p\n", core, __func__, m_shm); +} + +/** + * Startup of the timer ring test EM application + */ +void test_start(const appl_conf_t *appl_conf) +{ + em_eo_t eo; + em_timer_attr_t attr; + em_queue_t queue; + em_status_t stat; + app_eo_ctx_t *eo_ctx; + + if (appl_conf->num_procs > 1) { + APPL_PRINT("\nPROCESS MODE is not yet supported!\n"); + abort(); + } + + fix_setup(); + + eo_ctx = &m_shm->eo_context; + memset(eo_ctx, 0, sizeof(app_eo_ctx_t)); + + eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, + app_eo_stop, app_eo_stop_local, app_eo_receive, + eo_ctx); + test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); + + stat = em_register_error_handler(my_error_handler); + test_fatal_if(stat != EM_OK, "Failed to register error handler"); + + /* parallel high priority for timeout handling*/ + queue = em_queue_create("Tmo Q", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_HIGHEST, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create test queue!"); + eo_ctx->test_q = queue; + + /* another normal priority for heartbeat */ + queue = em_queue_create("HB Q", + EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to add HB queue!"); + eo_ctx->hb_q = queue; + + /* create HB timer */ + em_timer_attr_init(&attr); + strncpy(attr.name, "HBTimer", EM_TIMER_NAME_LEN); + m_shm->hb_tmr = em_timer_create(&attr); + test_fatal_if(m_shm->hb_tmr == EM_TIMER_UNDEF, + "Failed to create HB timer!"); + + trace_add(TEST_TIME_FN(), TRACE_OP_TMR_CREATE, em_core_id(), -1, -1, NULL, m_shm->hb_tmr); + + em_timer_capability_t capa = { 0 }; + + stat = em_timer_capability(&capa, g_options.clksrc); + test_fatal_if(stat != EM_OK, "em_timer_capability returned error for clk %u\n", + g_options.clksrc); + test_fatal_if(capa.ring.max_rings == 0, "Ring timers not supported!"); + + APPL_PRINT("Timer ring capability for clksrc %d:\n", g_options.clksrc); + APPL_PRINT(" maximum timers: %d\n", capa.ring.max_rings); + + double hz = frac2float(capa.ring.min_base_hz); + + APPL_PRINT(" minimum base_hz: %.3f\n", hz); + hz = frac2float(capa.ring.max_base_hz); + APPL_PRINT(" maximum base_hz: %.3f\n", hz); + + /* Start EO */ + stat = em_eo_start_sync(eo, NULL, NULL); + test_fatal_if(stat != EM_OK, "Failed to start EO!"); +} + +void test_stop(const appl_conf_t *appl_conf) +{ + if (appl_conf->num_procs > 1) { + APPL_PRINT("%s(): skip\n", __func__); + return; + } + + em_eo_t eo = em_eo_find(APP_EO_NAME); + + test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); + + em_status_t ret = em_eo_stop_sync(eo); + + test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_timer_delete(m_shm->hb_tmr); + test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", + m_shm->hb_tmr, ret); + m_shm->hb_tmr = EM_TIMER_UNDEF; + + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete Qs:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + + em_unregister_error_handler(); + APPL_PRINT("test_stopped\n"); +} + +void test_term(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + + if (m_shm != NULL) { + odp_shm_free(odp_shm); + m_shm = NULL; + odp_shm = ODP_SHM_INVALID; + } + if (odp_shm_trace != ODP_SHM_INVALID) { + odp_shm_free(odp_shm_trace); + odp_shm_trace = ODP_SHM_INVALID; + } +} + +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + app_msg_t *msg; + app_eo_ctx_t *eo_ctx = (app_eo_ctx_t *)eo_context; + + (void)eo; + (void)conf; + + odp_ticketlock_init(&tracelock); + print_setup(); + + for (unsigned int t = 0; t < g_options.num_timers; t++) + for (unsigned int to = 0; to < g_options.num_tmo; to++) { + eo_ctx->test_tmo[t][to] = EM_TMO_UNDEF; + eo_ctx->test_ev[t][to] = EM_EVENT_UNDEF; + } + + /* create periodic timeout for heartbeat */ + eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->hb_tmr, EM_TMO_FLAG_PERIODIC, eo_ctx->hb_q); + test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, + "Can't allocate heartbeat_tmo!\n"); + + em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); + + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", + sizeof(app_msg_t)); + + msg = em_event_pointer(event); + msg->count = 0; + msg->type = MSGTYPE_HB; + msg->tmo = eo_ctx->heartbeat_tmo; + uint64_t hb_hz = em_timer_get_freq(m_shm->hb_tmr); + + if (hb_hz < 10) + APPL_ERROR("WARNING: HB timer hz very low!?\n"); + + em_timer_tick_t period = hb_hz; /* 1s HB */ + + test_fatal_if(period < 1, "HB timer resolution is too low!\n"); + + eo_ctx->state = STATE_START; + eo_ctx->next_change = 2; + + if (g_options.profile) { + for (unsigned int c = 0; c < m_shm->core_count; c++) + for (int p = 0; p < NUM_PROFILES; p++) + eo_ctx->cdat[c].prof[p].min = UINT64_MAX; + } + + /* start heartbeat */ + em_status_t stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, period, event); + + test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); + trace_add(TEST_TIME_FN(), TRACE_OP_TMO_SET, em_core_id(), + -1, -1, NULL, eo_ctx->heartbeat_tmo); + + if (EXTRA_PRINTS) + APPL_PRINT("WARNING: extra prints enabled, expect some timing jitter\n"); + + stat = em_dispatch_register_enter_cb(enter_cb); + test_fatal_if(stat != EM_OK, "enter_cb() register failed!"); + stat = em_dispatch_register_exit_cb(exit_cb); + test_fatal_if(stat != EM_OK, "exit_cb() register failed!"); + + return EM_OK; +} + +/** + * @private + * + * EO per thread start function. + */ +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + int core = em_core_id(); + + (void)eo; + (void)eo_ctx; + + test_fatal_if(core >= MAX_CORES, "Too many cores!"); + + memset(&eo_ctx->cdat[core].rnd, 0, sizeof(rnd_state_t)); + initstate_r(time(NULL), eo_ctx->cdat[core].rnd.rndstate, RND_STATE_BUF, + &eo_ctx->cdat[core].rnd.rdata); + srandom(time(NULL)); + m_tracecount = 0; + trace_add(TEST_TIME_FN(), TRACE_OP_START, core, -1, -1, NULL, NULL); + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + em_event_t event = EM_EVENT_UNDEF; + + (void)eo; + + if (EXTRA_PRINTS) + APPL_PRINT("EO stop\n"); + + if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { + if (em_tmo_get_state(eo_ctx->heartbeat_tmo) == EM_TMO_STATE_ACTIVE) + em_tmo_cancel(eo_ctx->heartbeat_tmo, &event); + + em_tmo_delete(eo_ctx->heartbeat_tmo); + eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; + if (event != EM_EVENT_UNDEF) + em_free(event); + } + + delete_test_timer(eo_context); + + em_status_t ret = em_dispatch_unregister_enter_cb(enter_cb); + + test_fatal_if(ret != EM_OK, "enter_cb() unregister:%" PRI_STAT, ret); + ret = em_dispatch_unregister_exit_cb(exit_cb); + test_fatal_if(ret != EM_OK, "exit_cb() unregister:%" PRI_STAT, ret); + + if (EXTRA_PRINTS) + APPL_PRINT("EO stop done\n"); + return EM_OK; +} + +static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo) +{ + (void)eo; + + trace_add(TEST_TIME_FN(), TRACE_OP_END, em_core_id(), -1, -1, NULL, NULL); + + /* dump trace */ + if (g_options.tracelen) { + odp_ticketlock_lock(&tracelock); /* serialize printing */ + dump_trace(eo_context); + odp_ticketlock_unlock(&tracelock); + } + + return EM_OK; +} + +/* EO receive function */ +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, + void *q_context) +{ + uint64_t now = TEST_TIME_FN(); + app_eo_ctx_t *const eo_ctx = eo_context; + bool reuse = false; + + (void)q_context; + (void)queue; + + /* heartbeat */ + if (type == EM_EVENT_TYPE_SW) { + app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); + + switch (msgin->type) { + case MSGTYPE_HB: /* uses atomic queue */ + reuse = handle_heartbeat(eo_ctx, event, msgin, now); + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid msg received!\n"); + } + } else if (type == EM_EVENT_TYPE_TIMER_IND) { /* test timeout */ + reuse = handle_tmo(eo_ctx, event, now); /* uses parallel queue */ + } else { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event type %u!\n", type); + } + + if (!reuse) { + if (type == EM_EVENT_TYPE_TIMER_IND) { /* extra trace */ + unsigned int tmri, tmoi; + + ptr2tmo(em_tmo_get_userptr(event, NULL), &tmri, &tmoi); + trace_add(TEST_TIME_FN(), TRACE_OP_TMO_EV_FREE, em_core_id(), + tmri, tmoi, eo_ctx->test_tmo[tmri][tmoi], event); + eo_ctx->test_ev[tmri][tmoi] = EM_EVENT_UNDEF; + } + em_free(event); + } +} + +int main(int argc, char *argv[]) +{ + /* pick app-specific arguments after '--' */ + int i; + + APPL_PRINT("EM periodic ring timer test %s\n\n", VERSION); + + for (i = 1; i < argc; i++) { + if (!strcmp(argv[i], "--")) + break; + } + if (i < argc) { + if (!parse_my_args(i, argc, argv)) { + APPL_PRINT("Invalid application arguments\n"); + return 1; + } + } + + return cm_setup(argc, argv); +} diff --git a/programs/performance/timer_test_ring.h b/programs/performance/timer_test_ring.h index a525da18..c8faf273 100644 --- a/programs/performance/timer_test_ring.h +++ b/programs/performance/timer_test_ring.h @@ -1,212 +1,212 @@ -#include -#include - -#define APP_EO_NAME "ringEO" -#define SHM_NAME "TimerRing-test" -#define SHM_TRACE_NAME "TimerRing-trace" -#define MAX_CORES 64 -#define EXTRA_PRINTS 0 /* dev option, normally 0 */ -#define MAX_TEST_TIMERS 15 -#define MAX_TEST_TMO 100 -#define TEST_TIME_FN odp_time_global_ns /* OR odp_time_global_strict_ns */ -#define MAX_FILENAME 100 -#define PRINT_MAX_TMRS 10 - -const struct option longopts[] = { - {"loops", required_argument, NULL, 'l'}, - {"basehz", required_argument, NULL, 'b'}, - {"basehz-float", required_argument, NULL, 'f'}, - {"multiplier", required_argument, NULL, 'm'}, - {"max-multiplier", required_argument, NULL, 'M'}, - {"resolution", required_argument, NULL, 'r'}, - {"clksrc", required_argument, NULL, 'c'}, - {"looptime", required_argument, NULL, 't'}, - {"tracebuf", required_argument, NULL, 'T'}, - {"writefile", optional_argument, NULL, 'w'}, - {"num-tmo", required_argument, NULL, 'n'}, - {"start_offset", required_argument, NULL, 'o'}, - {"delay", required_argument, NULL, 'd'}, - {"recreate-timer", no_argument, NULL, 'R'}, - {"reuse-tmo", no_argument, NULL, 'N'}, - {"reuse-event", no_argument, NULL, 'E'}, - {"api-profile", no_argument, NULL, 'a'}, - {"help", no_argument, NULL, 'h'}, - {NULL, 0, NULL, 0} -}; - -const char *shortopts = "hRNEal:m:t:b:r:M:n:c:f:T:w::o:d:"; -/* descriptions for above options, keep in sync! */ -const char *descopts[] = { - "Number of test loops, default 1", - "Base hz as integer, default 100. Can be comma separated list.", - "Base hz as float, default 100. Approximated to fractions. Can be comma separated list.", - "Base rate multiplier, default 1. Can be comma separated list", - "Maximum multiplier, default 8. Can be comma separated list", - "Resolution, ns. Default 0 (system default). Can be comma separated list", - "Clock source for timer", - "Loop time, i.e. how long to run per loop. Seconds, default 30", - "Trace buffer size (number of traces per core). Default 0", - "Write trace dump to csv file at exit. Argument is file name. Default stdout", - "Number of timeouts per timer, default 1.", - "Relative start offset in timer ticks. Default 0. Can be comma separated list.", - "Add extra processing delay per tmo receive, ns. Default 0. Negative is random up to", - "Delete and re-create timers every loop, default no", - "Re-use tmo handles (new loop), default no", - "Re-use tmo event (new loop), default no", - "Profile API call times, default no", - "Print usage and exit", - NULL -}; - -const char *instructions = -"\nMain purpose of this tool is to test periodic ring timer functionality.\n" -"At least two EM timers are created. One standard timer for a heartbeat\n" -"driving test states. Second timer(s) is periodic ring for testing.\n" -"Multiple test timers are created if -b,M or r specify comma separated list.\n" -"Arguments to control EM setup and app itself are separated with -- e.g.:\n" -"./timer_test_ring -c0x30 -t -- -a -l2 -N\n\n" -"By default (no app options) this runs a simple test once.\n"; - -#define RND_STATE_BUF 32 -typedef struct rnd_state_t { - struct random_data rdata; - char rndstate[RND_STATE_BUF]; -} rnd_state_t; - -typedef struct prof_t { - uint64_t min; - uint64_t max; - uint64_t acc; - uint64_t num; -} prof_t; - -#define NUM_PROFILES 5 - -typedef enum prof_apis { - PROF_TMO_CREATE, - PROF_TMO_SET, - PROF_TMO_ACK, - PROF_TMO_CANCEL, - PROF_TMO_DELETE, - - PROF_TMO_LAST -} prof_apis; - -const char *prof_names[] = { - "TMO_CREATE", - "TMO_SET", - "TMO_ACK", - "TMO_CANCEL", - "TMO_DELETE" -}; - -typedef struct core_data { - unsigned int count[MAX_TEST_TIMERS][MAX_TEST_TMO]; - prof_t prof[NUM_PROFILES]; - uint64_t enter_ns; - uint64_t exit_ns; - uint64_t non_eo_ns; - uint64_t eo_ns; - rnd_state_t rnd; -} core_data; - -typedef enum emsgtype { - MSGTYPE_HB, - MSGTYPE_TMO -} emsgtype; - -typedef struct app_msg_t { - emsgtype type; - unsigned int count; - em_tmo_t tmo; - int tidx; -} app_msg_t; - -typedef struct tmo_setup { - em_tmo_t handle; -} tmo_setup; - -typedef enum app_state_t { - STATE_UNDEF, - STATE_START, - STATE_RUN, - STATE_STOP, - STATE_ANALYZE, - STATE_EXIT -} app_state_t; - -typedef struct app_eo_ctx_t { - atomic_int state; - - em_tmo_t heartbeat_tmo; - em_timer_t test_tmr[MAX_TEST_TIMERS]; - uint64_t tick_hz[MAX_TEST_TIMERS]; - em_queue_t hb_q; - em_queue_t test_q; - unsigned int next_change; - uint64_t start_time; - uint64_t stop_time; - uint64_t first_time[MAX_TEST_TIMERS][MAX_TEST_TMO]; - uint64_t last_time[MAX_TEST_TIMERS][MAX_TEST_TMO]; - em_tmo_t test_tmo[MAX_TEST_TIMERS][MAX_TEST_TMO]; - em_event_t test_ev[MAX_TEST_TIMERS][MAX_TEST_TMO]; - core_data cdat[MAX_CORES]; -} app_eo_ctx_t; - -typedef struct timer_app_shm_t { - /* Number of EM cores running the application */ - unsigned int core_count; - em_pool_t pool; - app_eo_ctx_t eo_context; - em_timer_t hb_tmr; -} timer_app_shm_t; - -typedef struct trace_entry_t { - uint64_t ns; - uint32_t op; - uint32_t val; - int64_t arg1; - int64_t arg2; - void *arg3; - void *arg4; -} trace_entry_t; - -typedef enum trace_op_t { - TRACE_OP_START, - TRACE_OP_TMR_CREATE, - TRACE_OP_TMO_CREATE, - TRACE_OP_TMO_SET, - TRACE_OP_TMO_RX, - TRACE_OP_HB_RX, - TRACE_OP_TMO_CANCEL, - TRACE_OP_TMO_DELETE, - TRACE_OP_TMR_DELETE, - TRACE_OP_TMO_ACK, - TRACE_OP_TMO_ACK_LAST, - TRACE_OP_TMO_EV_FREE, - TRACE_OP_DELAY, - TRACE_OP_SIGINT, - TRACE_OP_END, - - TRACE_OP_LAST - -} trace_op_t; - -const char *trace_op_labels[] = { - "START", - "TMR_CREATE", - "TMO_CREATE", - "TMO_SET", - "TMO_RX", - "HB_RX", - "TMO_CANCEL", - "TMO_DELETE", - "TMR_DELETE", - "TMO_ACK", - "TMO_LAST", - "TMO_EV_FREE", - "DELAY", - "SIGINT", - "END", - "OVERFLOW" -}; +#include +#include + +#define APP_EO_NAME "ringEO" +#define SHM_NAME "TimerRing-test" +#define SHM_TRACE_NAME "TimerRing-trace" +#define MAX_CORES 64 +#define EXTRA_PRINTS 0 /* dev option, normally 0 */ +#define MAX_TEST_TIMERS 15 +#define MAX_TEST_TMO 100 +#define TEST_TIME_FN odp_time_global_ns /* OR odp_time_global_strict_ns */ +#define MAX_FILENAME 100 +#define PRINT_MAX_TMRS 10 + +const struct option longopts[] = { + {"loops", required_argument, NULL, 'l'}, + {"basehz", required_argument, NULL, 'b'}, + {"basehz-float", required_argument, NULL, 'f'}, + {"multiplier", required_argument, NULL, 'm'}, + {"max-multiplier", required_argument, NULL, 'M'}, + {"resolution", required_argument, NULL, 'r'}, + {"clksrc", required_argument, NULL, 'c'}, + {"looptime", required_argument, NULL, 't'}, + {"tracebuf", required_argument, NULL, 'T'}, + {"writefile", optional_argument, NULL, 'w'}, + {"num-tmo", required_argument, NULL, 'n'}, + {"start_offset", required_argument, NULL, 'o'}, + {"delay", required_argument, NULL, 'd'}, + {"recreate-timer", no_argument, NULL, 'R'}, + {"reuse-tmo", no_argument, NULL, 'N'}, + {"reuse-event", no_argument, NULL, 'E'}, + {"api-profile", no_argument, NULL, 'a'}, + {"help", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0} +}; + +const char *shortopts = "hRNEal:m:t:b:r:M:n:c:f:T:w::o:d:"; +/* descriptions for above options, keep in sync! */ +const char *descopts[] = { + "Number of test loops, default 1", + "Base hz as integer, default 100. Can be comma separated list.", + "Base hz as float, default 100. Approximated to fractions. Can be comma separated list.", + "Base rate multiplier, default 1. Can be comma separated list", + "Maximum multiplier, default 8. Can be comma separated list", + "Resolution, ns. Default 0 (system default). Can be comma separated list", + "Clock source for timer", + "Loop time, i.e. how long to run per loop. Seconds, default 30", + "Trace buffer size (number of traces per core). Default 0", + "Write trace dump to csv file at exit. Argument is file name. Default stdout", + "Number of timeouts per timer, default 1.", + "Relative start offset in timer ticks. Default 0. Can be comma separated list.", + "Add extra processing delay per tmo receive, ns. Default 0. Negative is random up to", + "Delete and re-create timers every loop, default no", + "Reuse tmo handles (new loop), default no", + "Reuse tmo event (new loop), default no", + "Profile API call times, default no", + "Print usage and exit", + NULL +}; + +const char *instructions = +"\nMain purpose of this tool is to test periodic ring timer functionality.\n" +"At least two EM timers are created. One standard timer for a heartbeat\n" +"driving test states. Second timer(s) is periodic ring for testing.\n" +"Multiple test timers are created if -b,M or r specify comma separated list.\n" +"Arguments to control EM setup and app itself are separated with -- e.g.:\n" +"./timer_test_ring -c0x30 -t -- -a -l2 -N\n\n" +"By default (no app options) this runs a simple test once.\n"; + +#define RND_STATE_BUF 32 +typedef struct rnd_state_t { + struct random_data rdata; + char rndstate[RND_STATE_BUF]; +} rnd_state_t; + +typedef struct prof_t { + uint64_t min; + uint64_t max; + uint64_t acc; + uint64_t num; +} prof_t; + +#define NUM_PROFILES 5 + +typedef enum prof_apis { + PROF_TMO_CREATE, + PROF_TMO_SET, + PROF_TMO_ACK, + PROF_TMO_CANCEL, + PROF_TMO_DELETE, + + PROF_TMO_LAST +} prof_apis; + +const char *prof_names[] = { + "TMO_CREATE", + "TMO_SET", + "TMO_ACK", + "TMO_CANCEL", + "TMO_DELETE" +}; + +typedef struct core_data { + unsigned int count[MAX_TEST_TIMERS][MAX_TEST_TMO]; + prof_t prof[NUM_PROFILES]; + uint64_t enter_ns; + uint64_t exit_ns; + uint64_t non_eo_ns; + uint64_t eo_ns; + rnd_state_t rnd; +} core_data; + +typedef enum emsgtype { + MSGTYPE_HB, + MSGTYPE_TMO +} emsgtype; + +typedef struct app_msg_t { + emsgtype type; + unsigned int count; + em_tmo_t tmo; + int tidx; +} app_msg_t; + +typedef struct tmo_setup { + em_tmo_t handle; +} tmo_setup; + +typedef enum app_state_t { + STATE_UNDEF, + STATE_START, + STATE_RUN, + STATE_STOP, + STATE_ANALYZE, + STATE_EXIT +} app_state_t; + +typedef struct app_eo_ctx_t { + atomic_int state; + + em_tmo_t heartbeat_tmo; + em_timer_t test_tmr[MAX_TEST_TIMERS]; + uint64_t tick_hz[MAX_TEST_TIMERS]; + em_queue_t hb_q; + em_queue_t test_q; + unsigned int next_change; + uint64_t start_time; + uint64_t stop_time; + uint64_t first_time[MAX_TEST_TIMERS][MAX_TEST_TMO]; + uint64_t last_time[MAX_TEST_TIMERS][MAX_TEST_TMO]; + em_tmo_t test_tmo[MAX_TEST_TIMERS][MAX_TEST_TMO]; + em_event_t test_ev[MAX_TEST_TIMERS][MAX_TEST_TMO]; + core_data cdat[MAX_CORES]; +} app_eo_ctx_t; + +typedef struct timer_app_shm_t { + /* Number of EM cores running the application */ + unsigned int core_count; + em_pool_t pool; + app_eo_ctx_t eo_context; + em_timer_t hb_tmr; +} timer_app_shm_t; + +typedef struct trace_entry_t { + uint64_t ns; + uint32_t op; + uint32_t val; + int64_t arg1; + int64_t arg2; + void *arg3; + void *arg4; +} trace_entry_t; + +typedef enum trace_op_t { + TRACE_OP_START, + TRACE_OP_TMR_CREATE, + TRACE_OP_TMO_CREATE, + TRACE_OP_TMO_SET, + TRACE_OP_TMO_RX, + TRACE_OP_HB_RX, + TRACE_OP_TMO_CANCEL, + TRACE_OP_TMO_DELETE, + TRACE_OP_TMR_DELETE, + TRACE_OP_TMO_ACK, + TRACE_OP_TMO_ACK_LAST, + TRACE_OP_TMO_EV_FREE, + TRACE_OP_DELAY, + TRACE_OP_SIGINT, + TRACE_OP_END, + + TRACE_OP_LAST + +} trace_op_t; + +const char *trace_op_labels[] = { + "START", + "TMR_CREATE", + "TMO_CREATE", + "TMO_SET", + "TMO_RX", + "HB_RX", + "TMO_CANCEL", + "TMO_DELETE", + "TMR_DELETE", + "TMO_ACK", + "TMO_LAST", + "TMO_EV_FREE", + "DELAY", + "SIGINT", + "END", + "OVERFLOW" +}; diff --git a/robot-tests/bench/bench_event.robot b/robot-tests/bench/bench_event.robot index ada6a9f5..dac907df 100644 --- a/robot-tests/bench/bench_event.robot +++ b/robot-tests/bench/bench_event.robot @@ -16,4 +16,4 @@ Run bench_event [Documentation] Run bench_event @{args} = Create List -w - Run Bench args=${args} time_out=1m30s + Run Bench args=${args} time_out=5m30s diff --git a/robot-tests/bench/bench_pool.robot b/robot-tests/bench/bench_pool.robot index f8ae39f0..326ccc88 100644 --- a/robot-tests/bench/bench_pool.robot +++ b/robot-tests/bench/bench_pool.robot @@ -16,4 +16,4 @@ Run bench_pool [Documentation] Run bench_pool @{args} = Create List -w - Run Bench args=${args} time_out=10s + Run Bench args=${args} time_out=5m10s diff --git a/robot-tests/common.resource b/robot-tests/common.resource index fe922f1b..3b23bf69 100644 --- a/robot-tests/common.resource +++ b/robot-tests/common.resource @@ -72,7 +72,7 @@ Run EM-ODP Test # sleep_time: sleep time (in seconds) before sending SIGINT to the em-odp application # regex_match: regex pattern that must be matched - [Arguments] ${sleep_time} ${regex_match} + [Arguments] ${sleep_time} ${regex_match} ${args}=@{EMPTY} # In order to start application log from a new line Log To Console \n @@ -80,6 +80,7 @@ Run EM-ODP Test # Run application with given arguments ${app_handle} = Process.Start Process taskset -c ${TASKSET_CORES} ${APPLICATION} ... @{CM_ARGS} + ... @{args} ... stderr=STDOUT ... shell=True ... stdout=${TEMPDIR}/stdout.txt diff --git a/robot-tests/performance/loop_united.robot b/robot-tests/performance/loop_united.robot new file mode 100755 index 00000000..7fc9d654 --- /dev/null +++ b/robot-tests/performance/loop_united.robot @@ -0,0 +1,32 @@ +*** Comments *** +Copyright (c) 2020-2022, Nokia Solutions and Networks +All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + + +*** Settings *** +Documentation Test Loop United -c ${CORE_MASK} -${APPLICATION_MODE} -- -l ${LOOP_TYPE} +Resource ../common.resource +Test Setup Set Log Level TRACE +Test Teardown Kill Any Hanging Applications + + +*** Variables *** +${FIRST_REGEX} = SEPARATOR= +... cycles/event:\\s*[0-9]+\\.[0-9]+\\s*Mevents/s/core:\\s*[0-9]+\\.[0-9]+ +... \\s*[0-9]+\\s*MHz\\s*core[0-9]+\\s*[0-9]+ + +@{REGEX_MATCH} = +... ${FIRST_REGEX} +... Done\\s*-\\s*exit + + +*** Test Cases *** +Test Loop United Loop + [Documentation] loop_united -c ${CORE_MASK} -${APPLICATION_MODE} -- -l ${LOOP_TYPE} + [TAGS] ${CORE_MASK} ${APPLICATION_MODE} ${LOOP_TYPE} + + @{app_args} = Create List + ... -- -l ${LOOP_TYPE} + + Run EM-ODP Test sleep_time=60 regex_match=${REGEX_MATCH} args=${app_args} diff --git a/scripts/em_odp_check b/scripts/em_odp_check.sh similarity index 83% rename from scripts/em_odp_check rename to scripts/em_odp_check.sh index 68f83fc7..5e2b13aa 100755 --- a/scripts/em_odp_check +++ b/scripts/em_odp_check.sh @@ -29,17 +29,17 @@ IGNORE+='STRNCPY', IGNORE+='TRACING_LOGGING' # Run cleanfile -$DIR/cleanfile $1 2> /dev/null +"$DIR"/cleanfile "$1" 2> /dev/null # Run checkpatch -$DIR/checkpatch.pl --file --no-tree --ignore $IGNORE --mailback --strict --terse \ - --no-summary --show-types $1 2> /dev/null +"$DIR"/checkpatch.pl --file --no-tree --ignore $IGNORE --mailback --strict --terse \ + --no-summary --show-types "$1" 2> /dev/null ret_checkpatch=$? # Run codespell (if installed) with default dictionary-file from installation ret_codespell=0 if which codespell > /dev/null; then - codespell $1 --ignore-words-list=ptd,numer,stdio,endcode + codespell "$1" --ignore-words-list=ptd,numer,stdio,endcode ret_codespell=$? fi diff --git a/scripts/style_check.py b/scripts/style_check.py index 63234695..b3a18e9a 100755 --- a/scripts/style_check.py +++ b/scripts/style_check.py @@ -1,67 +1,67 @@ -#!/usr/bin/env python - -import os -import sys -import threading - -# Conjure repo root dir. Presume that parent of script dir is repo root folder. -ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) - -# Style check script command -C_CHECK = ROOT_DIR + "/scripts/em_odp_check " - -# File extensions to check -EXT = ('.c', '.h') - -# Start checking from these folders -CHECK_DIRS = ["include", "src", "programs"] - -# Filter out these directories -IGNORE_DIRS = [] - -# Set absolute paths to check dirs -CHECK_DIRS = [os.path.join(ROOT_DIR, dir) for dir in CHECK_DIRS] - -# Multithread safe function to run the check script for file in file_list -def run_checks(): - global rc - - while file_list: - file = file_list.pop() - - # Option to run different check script for different files - # if file.endswith(('.c', '.h')): - cmd = C_CHECK + file - - if os.system(cmd) != 0: - rc = 1 - -rc = 0 -file_list = [] -threads = [] - -# Collect and add all files to be checked to file_list -for check_dir in CHECK_DIRS: - for root, dirs, files in os.walk(check_dir): - if not any(path in root for path in IGNORE_DIRS): - for file in files: - if file.endswith(EXT): - file_list.append(os.path.join(root, file)) - - -# Run checks on all files in file_list with multiple threads -for i in range(15): - t = threading.Thread(target=run_checks) - threads.append(t) - t.start() - -# Wait for threads -for t in threads: - t.join() - -if rc == 1: - print("Style errors found.") -else: - print("Style check OK!") - -sys.exit(rc) +#!/usr/bin/env python + +import os +import sys +import threading + +# Conjure repo root dir. Presume that parent of script dir is repo root folder. +ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) + +# Style check script command +C_CHECK = ROOT_DIR + "/scripts/em_odp_check.sh " + +# File extensions to check +EXT = ('.c', '.h') + +# Start checking from these folders +CHECK_DIRS = ["include", "src", "programs"] + +# Filter out these directories +IGNORE_DIRS = [] + +# Set absolute paths to check dirs +CHECK_DIRS = [os.path.join(ROOT_DIR, dir) for dir in CHECK_DIRS] + +# Multithread safe function to run the check script for file in file_list +def run_checks(): + global rc + + while file_list: + file = file_list.pop() + + # Option to run different check script for different files + # if file.endswith(('.c', '.h')): + cmd = C_CHECK + file + + if os.system(cmd) != 0: + rc = 1 + +rc = 0 +file_list = [] +threads = [] + +# Collect and add all files to be checked to file_list +for check_dir in CHECK_DIRS: + for root, dirs, files in os.walk(check_dir): + if not any(path in root for path in IGNORE_DIRS): + for file in files: + if file.endswith(EXT): + file_list.append(os.path.join(root, file)) + + +# Run checks on all files in file_list with multiple threads +for i in range(15): + t = threading.Thread(target=run_checks) + threads.append(t) + t.start() + +# Wait for threads +for t in threads: + t.join() + +if rc == 1: + print("Style errors found.") +else: + print("Style check OK!") + +sys.exit(rc) diff --git a/src/em_atomic_group_types.h b/src/em_atomic_group_types.h index e7c6da6b..4d2925c8 100644 --- a/src/em_atomic_group_types.h +++ b/src/em_atomic_group_types.h @@ -1,93 +1,93 @@ -/* - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * EM internal atomic group types & definitions - * - */ - -#ifndef EM_ATOMIC_GROUP_TYPES_H_ -#define EM_ATOMIC_GROUP_TYPES_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#define EVENT_CACHE_FLUSH 32 - -typedef struct { - /** The atomic group ID (handle) */ - em_atomic_group_t atomic_group; - /** Queue group that the atomic group belongs to */ - em_queue_group_t queue_group; - /** AG pool elem for linking free AGs for AG-alloc */ - objpool_elem_t atomic_group_pool_elem; - /** Internal stashes for events belonging to this group */ - struct { - /** for high priority events */ - odp_stash_t hi_prio; - /** for events of all other priority levels */ - odp_stash_t lo_prio; - } stashes; - - /** Atomic group element lock */ - env_spinlock_t lock ENV_CACHE_LINE_ALIGNED; - /** Number of queues that belong to this atomic group */ - env_atomic32_t num_queues; - - /** List of queues (q_elems) that belong to this atomic group */ - list_node_t qlist_head; - /** Atomic group name */ - char name[EM_ATOMIC_GROUP_NAME_LEN]; - - char end[0] ENV_CACHE_LINE_ALIGNED; -} atomic_group_elem_t ENV_CACHE_LINE_ALIGNED; - -/** - * Atomic group table - */ -typedef struct { - /** Atomic group element table */ - atomic_group_elem_t ag_elem[EM_MAX_ATOMIC_GROUPS]; -} atomic_group_tbl_t; - -/** - * Pool of free atomic groups - */ -typedef struct { - objpool_t objpool; -} atomic_group_pool_t; - -#ifdef __cplusplus -} -#endif - -#endif /* EM_ATOMIC_GROUP_TYPES_H_ */ +/* + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal atomic group types & definitions + * + */ + +#ifndef EM_ATOMIC_GROUP_TYPES_H_ +#define EM_ATOMIC_GROUP_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define EVENT_CACHE_FLUSH 32 + +typedef struct { + /** The atomic group ID (handle) */ + em_atomic_group_t atomic_group; + /** Queue group that the atomic group belongs to */ + em_queue_group_t queue_group; + /** AG pool elem for linking free AG:s for AG-alloc */ + objpool_elem_t atomic_group_pool_elem; + /** Internal stashes for events belonging to this group */ + struct { + /** for high priority events */ + odp_stash_t hi_prio; + /** for events of all other priority levels */ + odp_stash_t lo_prio; + } stashes; + + /** Atomic group element lock */ + env_spinlock_t lock ENV_CACHE_LINE_ALIGNED; + /** Number of queues that belong to this atomic group */ + env_atomic32_t num_queues; + + /** List of queues (q_elems) that belong to this atomic group */ + list_node_t qlist_head; + /** Atomic group name */ + char name[EM_ATOMIC_GROUP_NAME_LEN]; + + char end[0] ENV_CACHE_LINE_ALIGNED; +} atomic_group_elem_t ENV_CACHE_LINE_ALIGNED; + +/** + * Atomic group table + */ +typedef struct { + /** Atomic group element table */ + atomic_group_elem_t ag_elem[EM_MAX_ATOMIC_GROUPS]; +} atomic_group_tbl_t; + +/** + * Pool of free atomic groups + */ +typedef struct { + objpool_t objpool; +} atomic_group_pool_t; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_ATOMIC_GROUP_TYPES_H_ */ diff --git a/src/em_chaining.c b/src/em_chaining.c index 79551d0f..8a7589df 100644 --- a/src/em_chaining.c +++ b/src/em_chaining.c @@ -1,256 +1,258 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* em_output_func_t for event-chaining output*/ -int chaining_output(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args); - -/** - * This function is declared as a weak symbol in em_chaining.h, meaning that the - * user can override it during linking with another implementation. - */ -em_status_t -event_send_device(em_event_t event, em_queue_t queue) -{ - internal_queue_t iq = {.queue = queue}; - - (void)event; - return INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_EVENT_SEND_DEVICE, - "No %s() function given!\t" - "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", - __func__, iq.device_id, iq.queue_id); -} - -/** - * This function is declared as a weak symbol in em_chaining.h, meaning that the - * user can override it during linking with another implementation. - */ -int -event_send_device_multi(const em_event_t events[], int num, em_queue_t queue) -{ - internal_queue_t iq = {.queue = queue}; - - (void)events; - (void)num; - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_EVENT_SEND_DEVICE_MULTI, - "No %s() function given!\t" - "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", - __func__, iq.device_id, iq.queue_id); - return 0; -} - -static int -read_config_file(void) -{ - const char *conf_str; - int val = 0; - bool val_bool = false; - int ret; - - /* Zero all options first */ - memset(&em_shm->opt.event_chaining, 0, sizeof(em_shm->opt.event_chaining)); - - EM_PRINT("EM Event-Chaining config:\n"); - /* - * Option: event_chaining.order_keep - runtime enable/disable - */ - conf_str = "event_chaining.order_keep"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.event_chaining.order_keep = val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - /* Read no more options if ordering is disabled */ - if (!em_shm->opt.event_chaining.order_keep) - return 0; /* Note! */ - - /* Temporary: Event chaining re-ordering not yet supported */ - if (unlikely(em_shm->opt.event_chaining.order_keep)) { - EM_LOG(EM_LOG_ERR, - "Config option %s: %s(%d) currently not supported\n", - conf_str, val_bool ? "true" : "false", val_bool); - return -1; - } - - /* - * Option: event_chaining.num_order_queues - * (only read if .order_keep == true above) - */ - conf_str = "event_chaining.num_order_queues"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - if (val < 0 || val > MAX_CHAINING_OUTPUT_QUEUES) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d' (max: %d)\n", - conf_str, val, MAX_CHAINING_OUTPUT_QUEUES); - return -1; - } - /* store & print the value */ - em_shm->opt.event_chaining.num_order_queues = val; - EM_PRINT(" %s: %d (max: %d)\n", conf_str, val, - MAX_CHAINING_OUTPUT_QUEUES); - - return 0; -} - -em_status_t -chaining_init(event_chaining_t *event_chaining) -{ - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - /* Remains '0' if 'event_chaining.order_keep = false' in config file */ - event_chaining->num_output_queues = 0; - - for (unsigned int i = 0; i < MAX_CHAINING_OUTPUT_QUEUES; i++) - event_chaining->output_queues[i] = EM_QUEUE_UNDEF; - - if (!em_shm->opt.event_chaining.order_keep) - return EM_OK; /* don't create output queues for event chaining */ - - /* - * Create EM output queues for event chaining, needed to maintain event - * order during an ordered context - */ - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - /* Set output-queue callback function, no args needed */ - output_conf.output_fn = chaining_output; - output_conf.output_fn_args = NULL; - output_conf.args_len = 0; - - const unsigned int num = em_shm->opt.event_chaining.num_order_queues; - unsigned char idx = 0; - - for (unsigned int i = 0; i < num; i++) { - char name[EM_QUEUE_NAME_LEN]; - - snprintf(name, sizeof(name), "Event-Chaining-Output-%02u", idx); - idx++; - name[sizeof(name) - 1] = '\0'; - - em_queue_t output_queue = em_queue_create(name, - EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, - &queue_conf); - if (unlikely(output_queue == EM_QUEUE_UNDEF)) - return EM_ERR_ALLOC_FAILED; - - event_chaining->num_output_queues++; - event_chaining->output_queues[i] = output_queue; - } - - return EM_OK; -} - -em_status_t -chaining_term(const event_chaining_t *event_chaining) -{ - /* num = 0 if 'event_chaining.order_keep = false' in config file */ - const unsigned int num = event_chaining->num_output_queues; - - for (unsigned int i = 0; i < num; i++) { - em_queue_t output_queue = event_chaining->output_queues[i]; - /* delete the output queues associated with event chaining */ - em_status_t stat = em_queue_delete(output_queue); - - if (unlikely(stat != EM_OK)) - return stat; - } - - return EM_OK; -} - -/** - * Output-queue callback function of type 'em_output_func_t' for Event-Chaining. - * Only needed when sending during an ordered-context when the EM config file - * option is set to 'event_chaining.order_keep = true'. - */ -int -chaining_output(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args) -{ - /* - * NOTE! - * Temporary: Not supporting the EM config file option - * 'event_chaining.order_keep = true' at the moment, checked during - * chaining_init() -> read_config_file(). - * This function will thus not be called until support added. - */ - em_queue_t chaining_queue = EM_QUEUE_UNDEF; - - (void)output_queue; - (void)output_fn_args; - - if (unlikely(num <= 0)) - return 0; - - if (num == 1) { - em_status_t stat = event_send_device(events[0], chaining_queue); - - if (unlikely(stat != EM_OK)) - return 0; - return 1; - } - - /* - * num > 1: - */ - int ret = event_send_device_multi(events, num, chaining_queue); - - if (unlikely((unsigned int)ret != num)) { - if (ret < 0) - return 0; - else - return ret; - } - - return num; -} +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* em_output_func_t for event-chaining output*/ +int chaining_output(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args); + +/** + * Default implementation is declared as a weak symbol, meaning that the + * user can override the function during linking with another implementation. + */ +__attribute__((weak)) +em_status_t event_send_device(em_event_t event, em_queue_t queue) +{ + internal_queue_t iq = {.queue = queue}; + + (void)event; + + return INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_EVENT_SEND_DEVICE, + "No %s() function given!\t" + "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", + __func__, iq.device_id, iq.queue_id); +} + +/** + * Default implementation is declared as a weak symbol, meaning that the + * user can override the function during linking with another implementation. + */ +__attribute__((weak)) +int event_send_device_multi(const em_event_t events[], int num, em_queue_t queue) +{ + internal_queue_t iq = {.queue = queue}; + + (void)events; + (void)num; + + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_EVENT_SEND_DEVICE_MULTI, + "No %s() function given!\t" + "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", + __func__, iq.device_id, iq.queue_id); + return 0; +} + +static int +read_config_file(void) +{ + const char *conf_str; + int val = 0; + bool val_bool = false; + int ret; + + /* Zero all options first */ + memset(&em_shm->opt.event_chaining, 0, sizeof(em_shm->opt.event_chaining)); + + EM_PRINT("EM Event-Chaining config:\n"); + /* + * Option: event_chaining.order_keep - runtime enable/disable + */ + conf_str = "event_chaining.order_keep"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.event_chaining.order_keep = val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + /* Read no more options if ordering is disabled */ + if (!em_shm->opt.event_chaining.order_keep) + return 0; /* Note! */ + + /* Temporary: Event chaining re-ordering not yet supported */ + if (unlikely(em_shm->opt.event_chaining.order_keep)) { + EM_LOG(EM_LOG_ERR, + "Config option %s: %s(%d) currently not supported\n", + conf_str, val_bool ? "true" : "false", val_bool); + return -1; + } + + /* + * Option: event_chaining.num_order_queues + * (only read if .order_keep == true above) + */ + conf_str = "event_chaining.num_order_queues"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + if (val < 0 || val > MAX_CHAINING_OUTPUT_QUEUES) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d' (max: %d)\n", + conf_str, val, MAX_CHAINING_OUTPUT_QUEUES); + return -1; + } + /* store & print the value */ + em_shm->opt.event_chaining.num_order_queues = val; + EM_PRINT(" %s: %d (max: %d)\n", conf_str, val, + MAX_CHAINING_OUTPUT_QUEUES); + + return 0; +} + +em_status_t +chaining_init(event_chaining_t *event_chaining) +{ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + /* Remains '0' if 'event_chaining.order_keep = false' in config file */ + event_chaining->num_output_queues = 0; + + for (unsigned int i = 0; i < MAX_CHAINING_OUTPUT_QUEUES; i++) + event_chaining->output_queues[i] = EM_QUEUE_UNDEF; + + if (!em_shm->opt.event_chaining.order_keep) + return EM_OK; /* don't create output queues for event chaining */ + + /* + * Create EM output queues for event chaining, needed to maintain event + * order during an ordered context + */ + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + /* Set output-queue callback function, no args needed */ + output_conf.output_fn = chaining_output; + output_conf.output_fn_args = NULL; + output_conf.args_len = 0; + + const unsigned int num = em_shm->opt.event_chaining.num_order_queues; + unsigned char idx = 0; + + for (unsigned int i = 0; i < num; i++) { + char name[EM_QUEUE_NAME_LEN]; + + snprintf(name, sizeof(name), "Event-Chaining-Output-%02u", idx); + idx++; + name[sizeof(name) - 1] = '\0'; + + em_queue_t output_queue = em_queue_create(name, + EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, + &queue_conf); + if (unlikely(output_queue == EM_QUEUE_UNDEF)) + return EM_ERR_ALLOC_FAILED; + + event_chaining->num_output_queues++; + event_chaining->output_queues[i] = output_queue; + } + + return EM_OK; +} + +em_status_t +chaining_term(const event_chaining_t *event_chaining) +{ + /* num = 0 if 'event_chaining.order_keep = false' in config file */ + const unsigned int num = event_chaining->num_output_queues; + + for (unsigned int i = 0; i < num; i++) { + em_queue_t output_queue = event_chaining->output_queues[i]; + /* delete the output queues associated with event chaining */ + em_status_t stat = em_queue_delete(output_queue); + + if (unlikely(stat != EM_OK)) + return stat; + } + + return EM_OK; +} + +/** + * Output-queue callback function of type 'em_output_func_t' for Event-Chaining. + * Only needed when sending during an ordered-context when the EM config file + * option is set to 'event_chaining.order_keep = true'. + */ +int +chaining_output(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args) +{ + /* + * NOTE! + * Temporary: Not supporting the EM config file option + * 'event_chaining.order_keep = true' at the moment, checked during + * chaining_init() -> read_config_file(). + * This function will thus not be called until support added. + */ + em_queue_t chaining_queue = EM_QUEUE_UNDEF; + + (void)output_queue; + (void)output_fn_args; + + if (unlikely(num <= 0)) + return 0; + + if (num == 1) { + em_status_t stat = event_send_device(events[0], chaining_queue); + + if (unlikely(stat != EM_OK)) + return 0; + return 1; + } + + /* + * num > 1: + */ + int ret = event_send_device_multi(events, num, chaining_queue); + + if (unlikely((unsigned int)ret != num)) { + if (ret < 0) + return 0; + else + return ret; + } + + return num; +} diff --git a/src/em_chaining.h b/src/em_chaining.h index 4587ea2e..bfb59d3b 100644 --- a/src/em_chaining.h +++ b/src/em_chaining.h @@ -1,213 +1,213 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * EM event chaining support - */ - -#ifndef EM_CHAINING_H_ -#define EM_CHAINING_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#pragma GCC visibility push(default) -/** - * This function is declared as a weak symbol, indicating that the user should - * override it during linking with another implementation if event chaining is - * used. - */ -__attribute__((weak)) -em_status_t event_send_device(em_event_t event, em_queue_t queue); -/** - * This function is declared as a weak symbol, indicating that the user should - * override it during linking with another implementation if event chaining is - * used. - */ -__attribute__((weak)) -int event_send_device_multi(const em_event_t events[], int num, - em_queue_t queue); -#pragma GCC visibility pop - -/** - * Initialize event chaining during start-up - */ -em_status_t -chaining_init(event_chaining_t *event_chaining); - -/** - * Terminate event chaining during shut-down - */ -em_status_t -chaining_term(const event_chaining_t *event_chaining); - -/** - * Send an event to out of EM (e.g. to another device) via event-chaining and a - * user-provided function 'event_send_device()'. - * @see event_send_device() - */ -static inline em_status_t -send_chaining(em_event_t event, em_queue_t chaining_queue) -{ - const unsigned int num_outq = em_shm->event_chaining.num_output_queues; - const em_sched_context_type_t sched_ctx_type = - em_locm.current.sched_context_type; - - if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) - return event_send_device(event, chaining_queue); - - /* always use the same output queue for each chaining queue */ - const internal_queue_t iq = {.queue = chaining_queue}; - em_queue_t output_queue; - queue_elem_t *output_q_elem; - uint32_t idx; - - idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; - output_queue = em_shm->event_chaining.output_queues[idx]; - output_q_elem = queue_elem_get(output_queue); - - RETURN_ERROR_IF(EM_CHECK_LEVEL >= 3 && !output_q_elem, - EM_ERR_BAD_ID, EM_ESCOPE_EVENT_SEND_DEVICE, - "Invalid output queue:%" PRI_QUEUE "", output_queue); - - return send_output(event, output_q_elem); -} - -/** - * Send 'num' events out of EM (e.g. to another device) via event-chaining and a - * user-provided function 'event_send_device_multi()'. - * @see event_send_device_multi() - */ -static inline int -send_chaining_multi(const em_event_t events[], const int num, - em_queue_t chaining_queue) -{ - const unsigned int num_outq = em_shm->event_chaining.num_output_queues; - const em_sched_context_type_t sched_ctx_type = - em_locm.current.sched_context_type; - - if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) - return event_send_device_multi(events, num, chaining_queue); - - /* always use the same output queue for each chaining queue */ - const internal_queue_t iq = {.queue = chaining_queue}; - em_queue_t output_queue; - queue_elem_t *output_q_elem; - uint32_t idx; - - idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; - output_queue = em_shm->event_chaining.output_queues[idx]; - output_q_elem = queue_elem_get(output_queue); - - if (unlikely(EM_CHECK_LEVEL >= 3 && !output_q_elem)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_SEND_DEVICE_MULTI, - "Invalid output queue:%" PRI_QUEUE "", output_queue); - return 0; - } - - return send_output_multi(events, num, output_q_elem); -} - -/** - * Send an event tagged with an event group out of EM (e.g. to another device) - * via event-chaining and a user-provided function 'event_send_device()'. - * @see event_send_device() - */ -static inline em_status_t -send_chaining_egrp(em_event_t event, event_hdr_t *const ev_hdr, - em_queue_t chaining_queue, - const event_group_elem_t *egrp_elem) -{ - if (!egrp_elem) - return send_chaining(event, chaining_queue); - - em_event_group_t save_egrp; - event_group_elem_t *save_egrp_elem; - int32_t save_egrp_gen; - - /* Send to another DEVICE with an event group */ - save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); - /* - * "Simulate" a dispatch round from evgrp perspective, - * send-device() instead of EO-receive() - */ - event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); - - em_status_t stat = send_chaining(event, chaining_queue); - - event_group_count_decrement(1); - restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); - - return stat; -} - -/** - * Send 'num' events tagged with an event group out of EM (e.g. to another device) - * via event-chaining and a user-provided function 'event_send_device_multi()'. - * @see event_send_device_multi() - */ -static inline int -send_chaining_egrp_multi(const em_event_t events[], event_hdr_t *const ev_hdrs[], - const int num, em_queue_t chaining_queue, - const event_group_elem_t *egrp_elem) -{ - if (!egrp_elem) - return send_chaining_multi(events, num, chaining_queue); - - em_event_group_t save_egrp; - event_group_elem_t *save_egrp_elem; - int32_t save_egrp_gen; - - /* Send to another DEVICE with an event group */ - save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); - /* - * "Simulate" dispatch rounds from evgrp perspective, - * send-device() instead of EO-receive(). - * Decrement evgrp-count by 'num' instead of by '1'. - * Note: event_group_set_local() called only once for - * all events. - */ - event_group_set_local(ev_hdrs[0]->egrp, ev_hdrs[0]->egrp_gen, num); - - int num_sent = send_chaining_multi(events, num, chaining_queue); - - event_group_count_decrement(num); - restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); - - return num_sent; -} - -#ifdef __cplusplus -} -#endif - -#endif /* EM_CHAINING_H_ */ +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * EM event chaining support + */ + +#ifndef EM_CHAINING_H_ +#define EM_CHAINING_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#pragma GCC visibility push(default) + +/** + * Send an event to out of EM. + * This function is declared as a weak symbol in default implementation + * in em_chaining.c, indicating that the user can override it during + * linking with another implementation if event chaining is used. + */ +em_status_t event_send_device(em_event_t event, em_queue_t queue); + +/** + * Send multiple events to out of EM. + * This function is declared as a weak symbol in default implementation + * in em_chaining.c, indicating that the user can override it during + * linking with another implementation if event chaining is used. + */ +int event_send_device_multi(const em_event_t events[], int num, em_queue_t queue); + +#pragma GCC visibility pop + +/** + * Initialize event chaining during start-up. + */ +em_status_t chaining_init(event_chaining_t *event_chaining); + +/** + * Terminate event chaining during shut-down. + */ +em_status_t chaining_term(const event_chaining_t *event_chaining); + +/** + * Send an event to out of EM (e.g. to another device) via event-chaining and a + * user-provided function 'event_send_device()'. + * @see event_send_device() + */ +static inline em_status_t +send_chaining(em_event_t event, em_queue_t chaining_queue) +{ + const unsigned int num_outq = em_shm->event_chaining.num_output_queues; + const em_sched_context_type_t sched_ctx_type = + em_locm.current.sched_context_type; + + if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) + return event_send_device(event, chaining_queue); + + /* always use the same output queue for each chaining queue */ + const internal_queue_t iq = {.queue = chaining_queue}; + em_queue_t output_queue; + queue_elem_t *output_q_elem; + uint32_t idx; + + idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; + output_queue = em_shm->event_chaining.output_queues[idx]; + output_q_elem = queue_elem_get(output_queue); + + RETURN_ERROR_IF(EM_CHECK_LEVEL >= 3 && !output_q_elem, + EM_ERR_BAD_ID, EM_ESCOPE_EVENT_SEND_DEVICE, + "Invalid output queue:%" PRI_QUEUE "", output_queue); + + return send_output(event, output_q_elem); +} + +/** + * Send 'num' events out of EM (e.g. to another device) via event-chaining and a + * user-provided function 'event_send_device_multi()'. + * @see event_send_device_multi() + */ +static inline int +send_chaining_multi(const em_event_t events[], const int num, + em_queue_t chaining_queue) +{ + const unsigned int num_outq = em_shm->event_chaining.num_output_queues; + const em_sched_context_type_t sched_ctx_type = + em_locm.current.sched_context_type; + + if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) + return event_send_device_multi(events, num, chaining_queue); + + /* always use the same output queue for each chaining queue */ + const internal_queue_t iq = {.queue = chaining_queue}; + em_queue_t output_queue; + queue_elem_t *output_q_elem; + uint32_t idx; + + idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; + output_queue = em_shm->event_chaining.output_queues[idx]; + output_q_elem = queue_elem_get(output_queue); + + if (unlikely(EM_CHECK_LEVEL >= 3 && !output_q_elem)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_SEND_DEVICE_MULTI, + "Invalid output queue:%" PRI_QUEUE "", output_queue); + return 0; + } + + return send_output_multi(events, num, output_q_elem); +} + +/** + * Send an event tagged with an event group out of EM (e.g. to another device) + * via event-chaining and a user-provided function 'event_send_device()'. + * @see event_send_device() + */ +static inline em_status_t +send_chaining_egrp(em_event_t event, event_hdr_t *const ev_hdr, + em_queue_t chaining_queue, + const event_group_elem_t *egrp_elem) +{ + if (!egrp_elem) + return send_chaining(event, chaining_queue); + + em_event_group_t save_egrp; + event_group_elem_t *save_egrp_elem; + int32_t save_egrp_gen; + + /* Send to another DEVICE with an event group */ + save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); + /* + * "Simulate" a dispatch round from evgrp perspective, + * send-device() instead of EO-receive() + */ + event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); + + em_status_t stat = send_chaining(event, chaining_queue); + + event_group_count_decrement(1); + restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); + + return stat; +} + +/** + * Send 'num' events tagged with an event group out of EM (e.g. to another device) + * via event-chaining and a user-provided function 'event_send_device_multi()'. + * @see event_send_device_multi() + */ +static inline int +send_chaining_egrp_multi(const em_event_t events[], event_hdr_t *const ev_hdrs[], + const int num, em_queue_t chaining_queue, + const event_group_elem_t *egrp_elem) +{ + if (!egrp_elem) + return send_chaining_multi(events, num, chaining_queue); + + em_event_group_t save_egrp; + event_group_elem_t *save_egrp_elem; + int32_t save_egrp_gen; + + /* Send to another DEVICE with an event group */ + save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); + /* + * "Simulate" dispatch rounds from evgrp perspective, + * send-device() instead of EO-receive(). + * Decrement evgrp-count by 'num' instead of by '1'. + * Note: event_group_set_local() called only once for + * all events. + */ + event_group_set_local(ev_hdrs[0]->egrp, ev_hdrs[0]->egrp_gen, num); + + int num_sent = send_chaining_multi(events, num, chaining_queue); + + event_group_count_decrement(num); + restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); + + return num_sent; +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_CHAINING_H_ */ diff --git a/src/em_event_group.c b/src/em_event_group.c index dc5fce65..a28e3e16 100644 --- a/src/em_event_group.c +++ b/src/em_event_group.c @@ -1,187 +1,247 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -static inline event_group_elem_t * -egrp_poolelem2egrp(objpool_elem_t *const event_group_pool_elem) -{ - return (event_group_elem_t *)((uintptr_t)event_group_pool_elem - - offsetof(event_group_elem_t, event_group_pool_elem)); -} - -em_status_t -event_group_init(event_group_tbl_t *const event_group_tbl, - event_group_pool_t *const event_group_pool) -{ - event_group_elem_t *egrp_elem; - const uint32_t objpool_subpools = OBJSUBPOOLS_MAX; - - int ret; - - memset(event_group_tbl, 0, sizeof(event_group_tbl_t)); - memset(event_group_pool, 0, sizeof(event_group_pool_t)); - env_atomic32_init(&em_shm->event_group_count); - - for (int i = 0; i < EM_MAX_EVENT_GROUPS; i++) { - em_event_group_t egrp = egrp_idx2hdl(i); - - egrp_elem = event_group_elem_get(egrp); - if (unlikely(!egrp_elem)) - return EM_ERR_BAD_POINTER; - - egrp_elem->event_group = egrp; /* store handle */ - egrp_elem->all = 0; /* set num_notif = 0, ready = 0 */ - env_atomic64_set(&egrp_elem->post.atomic, 0); - env_atomic64_set(&egrp_elem->pre.atomic, 0); - } - - ret = objpool_init(&event_group_pool->objpool, objpool_subpools); - if (ret != 0) - return EM_ERR_LIB_FAILED; - - for (uint32_t i = 0; i < EM_MAX_EVENT_GROUPS; i++) { - egrp_elem = &event_group_tbl->egrp_elem[i]; - objpool_add(&event_group_pool->objpool, i % objpool_subpools, - &egrp_elem->event_group_pool_elem); - } - - return EM_OK; -} - -em_event_group_t -event_group_alloc(void) -{ - const event_group_elem_t *egrp_elem; - objpool_elem_t *egrp_pool_elem; - - egrp_pool_elem = objpool_rem(&em_shm->event_group_pool.objpool, - em_core_id()); - if (unlikely(egrp_pool_elem == NULL)) - return EM_EVENT_GROUP_UNDEF; - - egrp_elem = egrp_poolelem2egrp(egrp_pool_elem); - - env_atomic32_inc(&em_shm->event_group_count); - return egrp_elem->event_group; -} - -em_status_t -event_group_free(em_event_group_t event_group) -{ - event_group_elem_t *egrp_elem = event_group_elem_get(event_group); - - if (unlikely(egrp_elem == NULL)) - return EM_ERR_BAD_ID; - - objpool_add(&em_shm->event_group_pool.objpool, - egrp_elem->event_group_pool_elem.subpool_idx, - &egrp_elem->event_group_pool_elem); - - env_atomic32_dec(&em_shm->event_group_count); - return EM_OK; -} - -unsigned int -event_group_count(void) -{ - return env_atomic32_get(&em_shm->event_group_count); -} - -#define EGRP_INFO_HDR_FMT \ -"Number of event groups: %d\n\n" \ -"ID Ready Cnt(post) Gen Num-notif\n" \ -"------------------------------------------\n%s\n" - -#define EGRP_INFO_LEN 43 -#define EGRP_INFO_FMT "%-10" PRI_EGRP "%-7c%-11d%-5d%-9d\n" /*43 bytes*/ - -void event_group_info_print(void) -{ - unsigned int egrp_num; - em_event_group_t egrp; - const event_group_elem_t *egrp_elem; - egrp_counter_t egrp_count; - int len = 0; - int n_print = 0; - - egrp = em_event_group_get_first(&egrp_num); - - /* - * egrp_num may not match the amount of event groups actually returned - * by iterating with em_event_group_get_next() if event groups are added - * or removed in parallel by another core. Thus space for 10 extra event - * groups is reserved. If more than 10 event groups are added by other - * cores in paralle, we print only information of the (egrp_num + 10) - * event groups. - * - * The extra 1 byte is reserved for the terminating null byte. - */ - const int egrp_info_str_len = (egrp_num + 10) * EGRP_INFO_LEN + 1; - char egrp_info_str[egrp_info_str_len]; - - while (egrp != EM_EVENT_GROUP_UNDEF) { - egrp_elem = event_group_elem_get(egrp); - - if (unlikely(egrp_elem == NULL || !event_group_allocated(egrp_elem))) { - egrp = em_event_group_get_next(); - continue; - } - - egrp_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); - - n_print = snprintf(egrp_info_str + len, - egrp_info_str_len - len, - EGRP_INFO_FMT, egrp, - egrp_elem->ready ? 'Y' : 'N', - egrp_count.count, egrp_count.gen, - egrp_elem->num_notif); - - /* Not enough space to hold more event group info */ - if (n_print >= egrp_info_str_len - len) - break; - - len += n_print; - egrp = em_event_group_get_next(); - } - - /* No event group */ - if (len == 0) { - EM_PRINT("No event group!\n"); - return; - } - - /* - * To prevent printing incomplete information of the last event group - * when there is not enough space to hold all event group info. - */ - egrp_info_str[len] = '\0'; - EM_PRINT(EGRP_INFO_HDR_FMT, egrp_num, egrp_info_str); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +em_status_t +event_group_init(event_group_tbl_t *const event_group_tbl, + odp_stash_t *const event_group_stash) +{ + uint32_t idx; + uint32_t val; + odp_stash_t stash; + em_status_t err_status; + event_group_elem_t *egrp_elem; + odp_stash_param_t stash_param; + odp_stash_capability_t stash_capa; + + if (odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO) != 0) + return EM_ERR_LIB_FAILED; + + if (stash_capa.max_num.u32 < EM_MAX_EVENT_GROUPS) { + EM_LOG(EM_LOG_ERR, + "Maximum number of event groups(%d) exceeds the maximum\n" + "number of object handles(%" PRIu64 ") a stash can hold.\n", + EM_MAX_EVENT_GROUPS, stash_capa.max_num.u32); + return EM_ERR_TOO_LARGE; + } + + memset(event_group_tbl, 0, sizeof(event_group_tbl_t)); + env_atomic32_init(&em_shm->event_group_count); + + odp_stash_param_init(&stash_param); + + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_MT; + stash_param.get_mode = ODP_STASH_OP_MT; + stash_param.num_obj = EM_MAX_EVENT_GROUPS; + stash_param.obj_size = sizeof(uint32_t); + + stash = odp_stash_create("event_grp", &stash_param); + if (stash == ODP_STASH_INVALID) + return EM_ERR_LIB_FAILED; + + for (idx = 0; idx < EM_MAX_EVENT_GROUPS; idx++) { + egrp_elem = &em_shm->event_group_tbl.egrp_elem[idx]; + + egrp_elem->event_group = egrp_idx2hdl(idx); /* store handle */ + egrp_elem->all = 0; /* set num_notif = 0, ready = 0 */ + env_atomic64_set(&egrp_elem->post.atomic, 0); + env_atomic64_set(&egrp_elem->pre.atomic, 0); + + egrp_elem->in_stash = true; + if (odp_stash_put_u32(stash, &idx, 1) != 1) { + err_status = EM_ERR_LIB_FAILED; + egrp_elem->in_stash = false; + goto error_return; + } + } + + *event_group_stash = stash; + return EM_OK; + +error_return: + /* Empty the stash before destroying it */ + for (uint32_t i = 0; i < idx; i++) { + odp_stash_get_u32(stash, &val, 1); + + egrp_elem = &em_shm->event_group_tbl.egrp_elem[i]; + + egrp_elem->event_group = EM_EVENT_GROUP_UNDEF; + egrp_elem->in_stash = false; + } + + odp_stash_destroy(stash); + return err_status; +} + +em_status_t event_group_term(void) +{ + uint32_t tmp; + int32_t num; + odp_stash_t stash = em_shm->event_group_stash; + + while (1) { + num = odp_stash_get_u32(stash, &tmp, 1); + + if (num == 1) + continue; + + if (num == 0) + break; + + EM_PRINT("Stash get failed: %i\n", num); + return EM_ERR_LIB_FAILED; + } + + if (odp_stash_destroy(stash)) + return EM_ERR_LIB_FAILED; + + return EM_OK; +} + +em_event_group_t +event_group_alloc(void) +{ + uint32_t idx; + event_group_elem_t *egrp_elem; + + if (unlikely(odp_stash_get_u32(em_shm->event_group_stash, &idx, 1) != 1)) + return EM_EVENT_GROUP_UNDEF; + + if (EM_EVENT_GROUP_SAFE_MODE && idx >= EM_MAX_EVENT_GROUPS) + return EM_EVENT_GROUP_UNDEF; + + egrp_elem = &em_shm->event_group_tbl.egrp_elem[idx]; + egrp_elem->in_stash = false; + + env_atomic32_inc(&em_shm->event_group_count); + + return egrp_idx2hdl(idx); +} + +em_status_t +event_group_free(em_event_group_t event_group) +{ + event_group_elem_t *egrp_elem = event_group_elem_get(event_group); + uint32_t idx = (uint32_t)egrp_hdl2idx(event_group); + + if (unlikely(egrp_elem == NULL)) + return EM_ERR_BAD_ID; + + if (unlikely(egrp_elem->in_stash)) + return EM_ERR_BAD_STATE; + + egrp_elem->in_stash = true; + if (unlikely(odp_stash_put_u32(em_shm->event_group_stash, &idx, 1) != 1)) { + egrp_elem->in_stash = false; + return EM_ERR_LIB_FAILED; + } + + env_atomic32_dec(&em_shm->event_group_count); + return EM_OK; +} + +unsigned int +event_group_count(void) +{ + return env_atomic32_get(&em_shm->event_group_count); +} + +#define EGRP_INFO_HDR_FMT \ +"Number of event groups: %d\n\n" \ +"ID Ready Cnt(post) Gen Num-notif\n" \ +"------------------------------------------\n%s\n" + +#define EGRP_INFO_LEN 43 +#define EGRP_INFO_FMT "%-10" PRI_EGRP "%-7c%-11d%-5d%-9d\n" /*43 bytes*/ + +void event_group_info_print(void) +{ + unsigned int egrp_num; + em_event_group_t egrp; + const event_group_elem_t *egrp_elem; + egrp_counter_t egrp_count; + int len = 0; + int n_print = 0; + + egrp = em_event_group_get_first(&egrp_num); + + /* + * egrp_num may not match the amount of event groups actually returned + * by iterating with em_event_group_get_next() if event groups are added + * or removed in parallel by another core. Thus space for 10 extra event + * groups is reserved. If more than 10 event groups are added by other + * cores in parallel, we print only information of the (egrp_num + 10) + * event groups. + * + * The extra 1 byte is reserved for the terminating null byte. + */ + const int egrp_info_str_len = (egrp_num + 10) * EGRP_INFO_LEN + 1; + char egrp_info_str[egrp_info_str_len]; + + while (egrp != EM_EVENT_GROUP_UNDEF) { + egrp_elem = event_group_elem_get(egrp); + + if (unlikely(egrp_elem == NULL || !event_group_allocated(egrp_elem))) { + egrp = em_event_group_get_next(); + continue; + } + + egrp_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); + + n_print = snprintf(egrp_info_str + len, + egrp_info_str_len - len, + EGRP_INFO_FMT, egrp, + egrp_elem->ready ? 'Y' : 'N', + egrp_count.count, egrp_count.gen, + egrp_elem->num_notif); + + /* Not enough space to hold more event group info */ + if (n_print >= egrp_info_str_len - len) + break; + + len += n_print; + egrp = em_event_group_get_next(); + } + + /* No event group */ + if (len == 0) { + EM_PRINT("No event group!\n"); + return; + } + + /* + * To prevent printing incomplete information of the last event group + * when there is not enough space to hold all event group info. + */ + egrp_info_str[len] = '\0'; + EM_PRINT(EGRP_INFO_HDR_FMT, egrp_num, egrp_info_str); +} diff --git a/src/em_event_group.h b/src/em_event_group.h index 836d9a26..25b4c3db 100644 --- a/src/em_event_group.h +++ b/src/em_event_group.h @@ -1,282 +1,283 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * EM internal event group functions - * - */ - -#ifndef EM_EVENT_GROUP_H_ -#define EM_EVENT_GROUP_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#define invalid_egrp(event_group) \ - ((unsigned int)egrp_hdl2idx((event_group)) >= EM_MAX_EVENT_GROUPS) - -em_status_t -event_group_init(event_group_tbl_t *const event_group_tbl, - event_group_pool_t *const event_group_pool); - -em_event_group_t -event_group_alloc(void); - -em_status_t -event_group_free(em_event_group_t event_group); - -static inline int -event_group_allocated(const event_group_elem_t *egrp_elem) -{ - return !objpool_in_pool(&egrp_elem->event_group_pool_elem); -} - -static inline int -egrp_hdl2idx(const em_event_group_t event_group) -{ - return (int)((uintptr_t)event_group - 1); -} - -static inline em_event_group_t -egrp_idx2hdl(const int event_group_idx) -{ - return (em_event_group_t)(uintptr_t)(event_group_idx + 1); -} - -static inline event_group_elem_t * -event_group_elem_get(const em_event_group_t event_group) -{ - const int egrp_idx = egrp_hdl2idx(event_group); - event_group_elem_t *egrp_elem; - - if (unlikely((unsigned int)egrp_idx > EM_MAX_EVENT_GROUPS - 1)) - return NULL; - - egrp_elem = &em_shm->event_group_tbl.egrp_elem[egrp_idx]; - - return egrp_elem; -} - -static inline uint64_t -event_group_gen_get(const event_group_elem_t *egrp_elem) -{ - egrp_counter_t egrp_count; - - egrp_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); - - return egrp_count.gen; -} - -/** - * Verifies event group state and updates pre count before setting core local - * event group. Sets group to undefined for excess and expired group events. - */ -static inline void -set_local_safe(const em_event_group_t egrp, const int32_t egrp_gen, - const unsigned int decr) -{ - em_locm_t *const locm = &em_locm; - uint64_t current_count; - egrp_counter_t new_count; - event_group_elem_t *const egrp_elem = event_group_elem_get(egrp); - - do { - current_count = EM_ATOMIC_GET(&egrp_elem->pre.atomic); - new_count.all = current_count; - new_count.count -= decr; - /* Check for excess and expired group events */ - if (unlikely(new_count.count < 0 || - new_count.gen != egrp_gen)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, - EM_ESCOPE_EVENT_GROUP_UPDATE, - "Expired event group event received!"); - locm->current.egrp = EM_EVENT_GROUP_UNDEF; - return; - } - } while (!EM_ATOMIC_CMPSET(&egrp_elem->pre.atomic, - current_count, new_count.all)); - - locm->current.egrp_gen = egrp_gen; - locm->current.egrp = egrp; - locm->current.egrp_elem = egrp_elem; -} - -/** - * Set core local event group. - * - * Validates event group if EM_EVENT_GROUP_SAFE_MODE is enabled. - * - * Only called by the EM-dispatcher before receive function. - */ -static inline void -event_group_set_local(const em_event_group_t egrp, const int32_t egrp_gen, - const unsigned int decr) -{ - if (egrp == EM_EVENT_GROUP_UNDEF) - return; - - /* event group is set: */ - if (EM_EVENT_GROUP_SAFE_MODE) { - /* Group is validated before setting */ - set_local_safe(egrp, egrp_gen, decr); - } else { - em_locm_t *const locm = &em_locm; - - locm->current.egrp_elem = event_group_elem_get(egrp); - locm->current.egrp = egrp; - } -} - -/** - * Updates event group counter safely. Generation and count must be valid. - */ -static inline int64_t -count_decrement_safe(event_group_elem_t *const egrp_elem, - const unsigned int decr) -{ - uint64_t current_count; - egrp_counter_t new_count; - - do { - current_count = EM_ATOMIC_GET(&egrp_elem->post.atomic); - new_count.all = current_count; - new_count.count -= decr; - /* Validate group state and generation before changing count */ - if (unlikely(new_count.count < 0 || - new_count.gen != em_locm.current.egrp_gen)) { - /* Suppress error if group is aborted */ - if (!egrp_elem->ready) - INTERNAL_ERROR(EM_ERR_BAD_ID, - EM_ESCOPE_EVENT_GROUP_UPDATE, - "Expired grp event in post cnt!" - ); - return -1; - } - } while (!EM_ATOMIC_CMPSET(&egrp_elem->post.atomic, current_count, - new_count.all)); - return new_count.count; -} - -/** - * Decrements the event group count and sends notif events when group is done - * - * Only called by the EM-dispatcher after receive function. - */ -static inline void -event_group_count_decrement(const unsigned int decr) -{ - int64_t count; - event_group_elem_t *const egrp_elem = em_locm.current.egrp_elem; - - if (EM_EVENT_GROUP_SAFE_MODE) { - /* Validates group before updating counters */ - count = count_decrement_safe(egrp_elem, decr); - } else { - count = EM_ATOMIC_SUB_RETURN(&egrp_elem->post.atomic, decr); - - if (unlikely(count < 0)) { - if (egrp_elem->ready) { - /* Counter should stay zero if aborted */ - egrp_elem->post.all = 0; - return; - } - - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), - EM_ESCOPE_EVENT_GROUP_UPDATE, - "Group count already 0!"); - } - } - - if (count == 0) { /* Last event in the group */ - /* Setting pre_count here does nothing as both counters should - * be zero. Only due to incorrect usage pre_count is other than - * zero when notif events are about to be sent. - */ - if (EM_EVENT_GROUP_SAFE_MODE) - egrp_elem->pre.count = 0; - - const int num_notif = egrp_elem->num_notif; - em_status_t ret; - - /* Copy notifications to local memory */ - em_notif_t notif_tbl[EM_EVENT_GROUP_MAX_NOTIF]; - - for (int i = 0; i < num_notif; i++) { - notif_tbl[i].event = egrp_elem->notif_tbl[i].event; - notif_tbl[i].queue = egrp_elem->notif_tbl[i].queue; - notif_tbl[i].egroup = egrp_elem->notif_tbl[i].egroup; - } - - egrp_elem->ready = true; /* ready for 'apply' */ - ret = send_notifs(num_notif, notif_tbl); - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_GROUP_UPDATE, - "send notifs failed"); - } -} - -static inline void -save_current_evgrp(em_event_group_t *save_egrp /*out*/, - event_group_elem_t **save_egrp_elem /*out*/, - int32_t *save_egrp_gen /*out*/) -{ - const em_locm_t *locm = &em_locm; - - *save_egrp_elem = locm->current.egrp_elem; - *save_egrp = locm->current.egrp; - *save_egrp_gen = locm->current.egrp_gen; -} - -static inline void -restore_current_evgrp(const em_event_group_t saved_egrp, - event_group_elem_t *const saved_egrp_elem, - const int32_t saved_egrp_gen) -{ - em_locm_t *const locm = &em_locm; - - locm->current.egrp_elem = saved_egrp_elem; - locm->current.egrp = saved_egrp; - locm->current.egrp_gen = saved_egrp_gen; -} - -unsigned int -event_group_count(void); - -/** Print information about all event groups */ -void event_group_info_print(void); - -#ifdef __cplusplus -} -#endif - -#endif /* EM_EVENT_GROUP_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * EM internal event group functions + * + */ + +#ifndef EM_EVENT_GROUP_H_ +#define EM_EVENT_GROUP_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define invalid_egrp(event_group) \ + ((unsigned int)egrp_hdl2idx((event_group)) >= EM_MAX_EVENT_GROUPS) + +em_status_t +event_group_init(event_group_tbl_t *const event_group_tbl, odp_stash_t *const event_group_pool); + +em_status_t event_group_term(void); + +em_event_group_t +event_group_alloc(void); + +em_status_t +event_group_free(em_event_group_t event_group); + +static inline int +event_group_allocated(const event_group_elem_t *egrp_elem) +{ + return !egrp_elem->in_stash; +} + +static inline int +egrp_hdl2idx(const em_event_group_t event_group) +{ + return (int)((uintptr_t)event_group - 1); +} + +static inline em_event_group_t +egrp_idx2hdl(const int event_group_idx) +{ + return (em_event_group_t)(uintptr_t)(event_group_idx + 1); +} + +static inline event_group_elem_t * +event_group_elem_get(const em_event_group_t event_group) +{ + const int egrp_idx = egrp_hdl2idx(event_group); + event_group_elem_t *egrp_elem; + + if (unlikely((uint32_t)egrp_idx > EM_MAX_EVENT_GROUPS - 1)) + return NULL; + + egrp_elem = &em_shm->event_group_tbl.egrp_elem[egrp_idx]; + + return egrp_elem; +} + +static inline uint64_t +event_group_gen_get(const event_group_elem_t *egrp_elem) +{ + egrp_counter_t egrp_count; + + egrp_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); + + return egrp_count.gen; +} + +/** + * Verifies event group state and updates pre count before setting core local + * event group. Sets group to undefined for excess and expired group events. + */ +static inline void +set_local_safe(const em_event_group_t egrp, const int32_t egrp_gen, + const unsigned int decr) +{ + em_locm_t *const locm = &em_locm; + uint64_t current_count; + egrp_counter_t new_count; + event_group_elem_t *const egrp_elem = event_group_elem_get(egrp); + + do { + current_count = EM_ATOMIC_GET(&egrp_elem->pre.atomic); + new_count.all = current_count; + new_count.count -= decr; + /* Check for excess and expired group events */ + if (unlikely(new_count.count < 0 || + new_count.gen != egrp_gen)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, + EM_ESCOPE_EVENT_GROUP_UPDATE, + "Expired event group event received!"); + locm->current.egrp = EM_EVENT_GROUP_UNDEF; + return; + } + } while (!EM_ATOMIC_CMPSET(&egrp_elem->pre.atomic, + current_count, new_count.all)); + + locm->current.egrp_gen = egrp_gen; + locm->current.egrp = egrp; + locm->current.egrp_elem = egrp_elem; +} + +/** + * Set core local event group. + * + * Validates event group if EM_EVENT_GROUP_SAFE_MODE is enabled. + * + * Only called by the EM-dispatcher before receive function. + */ +static inline void +event_group_set_local(const em_event_group_t egrp, const int32_t egrp_gen, + const unsigned int decr) +{ + if (egrp == EM_EVENT_GROUP_UNDEF) + return; + + /* event group is set: */ + if (EM_EVENT_GROUP_SAFE_MODE) { + /* Group is validated before setting */ + set_local_safe(egrp, egrp_gen, decr); + } else { + em_locm_t *const locm = &em_locm; + + locm->current.egrp_elem = event_group_elem_get(egrp); + locm->current.egrp = egrp; + } +} + +/** + * Updates event group counter safely. Generation and count must be valid. + */ +static inline int64_t +count_decrement_safe(event_group_elem_t *const egrp_elem, + const unsigned int decr) +{ + uint64_t current_count; + egrp_counter_t new_count; + + do { + current_count = EM_ATOMIC_GET(&egrp_elem->post.atomic); + new_count.all = current_count; + new_count.count -= decr; + /* Validate group state and generation before changing count */ + if (unlikely(new_count.count < 0 || + new_count.gen != em_locm.current.egrp_gen)) { + /* Suppress error if group is aborted */ + if (!egrp_elem->ready) + INTERNAL_ERROR(EM_ERR_BAD_ID, + EM_ESCOPE_EVENT_GROUP_UPDATE, + "Expired grp event in post cnt!" + ); + return -1; + } + } while (!EM_ATOMIC_CMPSET(&egrp_elem->post.atomic, current_count, + new_count.all)); + return new_count.count; +} + +/** + * Decrements the event group count and sends notif events when group is done + * + * Only called by the EM-dispatcher after receive function. + */ +static inline void +event_group_count_decrement(const unsigned int decr) +{ + int64_t count; + event_group_elem_t *const egrp_elem = em_locm.current.egrp_elem; + + if (EM_EVENT_GROUP_SAFE_MODE) { + /* Validates group before updating counters */ + count = count_decrement_safe(egrp_elem, decr); + } else { + count = EM_ATOMIC_SUB_RETURN(&egrp_elem->post.atomic, decr); + + if (unlikely(count < 0)) { + if (egrp_elem->ready) { + /* Counter should stay zero if aborted */ + egrp_elem->post.all = 0; + return; + } + + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), + EM_ESCOPE_EVENT_GROUP_UPDATE, + "Group count already 0!"); + } + } + + if (count == 0) { /* Last event in the group */ + /* Setting pre_count here does nothing as both counters should + * be zero. Only due to incorrect usage pre_count is other than + * zero when notif events are about to be sent. + */ + if (EM_EVENT_GROUP_SAFE_MODE) + egrp_elem->pre.count = 0; + + const int num_notif = egrp_elem->num_notif; + em_status_t ret; + + /* Copy notifications to local memory */ + em_notif_t notif_tbl[EM_EVENT_GROUP_MAX_NOTIF]; + + for (int i = 0; i < num_notif; i++) { + notif_tbl[i].event = egrp_elem->notif_tbl[i].event; + notif_tbl[i].queue = egrp_elem->notif_tbl[i].queue; + notif_tbl[i].egroup = egrp_elem->notif_tbl[i].egroup; + } + + egrp_elem->ready = true; /* ready for 'apply' */ + ret = send_notifs(num_notif, notif_tbl); + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_GROUP_UPDATE, + "send notifs failed"); + } +} + +static inline void +save_current_evgrp(em_event_group_t *save_egrp /*out*/, + event_group_elem_t **save_egrp_elem /*out*/, + int32_t *save_egrp_gen /*out*/) +{ + const em_locm_t *locm = &em_locm; + + *save_egrp_elem = locm->current.egrp_elem; + *save_egrp = locm->current.egrp; + *save_egrp_gen = locm->current.egrp_gen; +} + +static inline void +restore_current_evgrp(const em_event_group_t saved_egrp, + event_group_elem_t *const saved_egrp_elem, + const int32_t saved_egrp_gen) +{ + em_locm_t *const locm = &em_locm; + + locm->current.egrp_elem = saved_egrp_elem; + locm->current.egrp = saved_egrp; + locm->current.egrp_gen = saved_egrp_gen; +} + +unsigned int +event_group_count(void); + +/** Print information about all event groups */ +void event_group_info_print(void); + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EVENT_GROUP_H_ */ diff --git a/src/em_event_group_types.h b/src/em_event_group_types.h index cf617c01..0161b109 100644 --- a/src/em_event_group_types.h +++ b/src/em_event_group_types.h @@ -1,119 +1,113 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EM_EVENT_GROUP_TYPES_H_ -#define EM_EVENT_GROUP_TYPES_H_ - -/** - * @file - * EM internal event group types & definitions - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Event Group counter - */ -typedef union { - struct { - /** Counter value */ - int32_t count; - /** Current event group generation */ - int32_t gen; - }; - uint64_t all; - env_atomic64_t atomic; -} egrp_counter_t; - -COMPILE_TIME_ASSERT(sizeof(egrp_counter_t) == sizeof(uint64_t), - EGRP_COUNTER_SIZE_ERROR); - -/** - * EM Event Group element - */ -typedef struct { - /** - * Contains the notification events to send when - * the event group count reaches zero. - */ - em_notif_t notif_tbl[EM_EVENT_GROUP_MAX_NOTIF] ENV_CACHE_LINE_ALIGNED; - - /** - * These event group counts determine the number of events to process - * until completion. Events are counted before and after the receive - * function in the dispatcher. - */ - egrp_counter_t pre ENV_CACHE_LINE_ALIGNED; - - egrp_counter_t post ENV_CACHE_LINE_ALIGNED; - - union { - struct { - /** The number of notif events stored in notif_tbl[] */ - int num_notif; - /** true/false, event group is ready for apply */ - bool ready; - }; - /** clear all options at once */ - uint64_t all; - }; - - /** The event group handle associated with this element */ - em_event_group_t event_group; - /** Associated pool element for this event group */ - objpool_elem_t event_group_pool_elem; -} event_group_elem_t ENV_CACHE_LINE_ALIGNED; - -COMPILE_TIME_ASSERT(offsetof(event_group_elem_t, all) + sizeof(uint64_t) - >= offsetof(event_group_elem_t, ready) + sizeof(bool), - EVENT_GROUP_ELEM_T__SIZE_ERROR); -/** - * Event group table - */ -typedef struct { - /** Event group element table */ - event_group_elem_t egrp_elem[EM_MAX_EVENT_GROUPS]; -} event_group_tbl_t; - -/** - * Pool of free event groups - */ -typedef struct { - objpool_t objpool; -} event_group_pool_t; - -#ifdef __cplusplus -} -#endif - -#endif /* EM_EVENT_GROUP_TYPES_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EM_EVENT_GROUP_TYPES_H_ +#define EM_EVENT_GROUP_TYPES_H_ + +/** + * @file + * EM internal event group types & definitions + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Event Group counter + */ +typedef union { + struct { + /** Counter value */ + int32_t count; + /** Current event group generation */ + int32_t gen; + }; + uint64_t all; + env_atomic64_t atomic; +} egrp_counter_t; + +COMPILE_TIME_ASSERT(sizeof(egrp_counter_t) == sizeof(uint64_t), + EGRP_COUNTER_SIZE_ERROR); + +/** + * EM Event Group element + */ +typedef struct { + /** + * Contains the notification events to send when + * the event group count reaches zero. + */ + em_notif_t notif_tbl[EM_EVENT_GROUP_MAX_NOTIF] ENV_CACHE_LINE_ALIGNED; + + /** + * These event group counts determine the number of events to process + * until completion. Events are counted before and after the receive + * function in the dispatcher. + */ + egrp_counter_t pre ENV_CACHE_LINE_ALIGNED; + + egrp_counter_t post ENV_CACHE_LINE_ALIGNED; + + union { + struct { + /** The number of notif events stored in notif_tbl[] */ + int num_notif; + /** true/false, event group is ready for apply */ + bool ready; + }; + /** clear all options at once */ + uint64_t all; + }; + + /** The event group handle associated with this element */ + em_event_group_t event_group; + + /** true/false, event group is in stash(free/unused) */ + bool in_stash; +} event_group_elem_t ENV_CACHE_LINE_ALIGNED; + +COMPILE_TIME_ASSERT(offsetof(event_group_elem_t, all) + sizeof(uint64_t) + >= offsetof(event_group_elem_t, ready) + sizeof(bool), + EVENT_GROUP_ELEM_T__SIZE_ERROR); +/** + * Event group table + */ +typedef struct { + /** Event group element table */ + event_group_elem_t egrp_elem[EM_MAX_EVENT_GROUPS]; +} event_group_tbl_t; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EVENT_GROUP_TYPES_H_ */ diff --git a/src/em_event_state.c b/src/em_event_state.c index a9acebd0..60d3c13c 100644 --- a/src/em_event_state.c +++ b/src/em_event_state.c @@ -1,1227 +1,1227 @@ -/* - * Copyright (c) 2020-2022, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -static int read_config_file(void); - -/** - * Initial counter values set during an alloc-operation: ref=1, send=0 - * (em_alloc/_multi(), em_event_clone()) - */ -static const evstate_cnt_t init_cnt_alloc = {.evgen = EVGEN_INIT, - .rsvd = 0, - .ref_cnt = REF_CNT_INIT - 1, - .send_cnt = 0 + SEND_CNT_INIT}; -/** - * Initial counter values for external events entering into EM - * (event not allocated by EM): ref=1, send=1 - */ -static const evstate_cnt_t init_cnt_extev = {.evgen = EVGEN_INIT, - .rsvd = 0, - .ref_cnt = REF_CNT_INIT - 1, - .send_cnt = 1 + SEND_CNT_INIT}; - -/** - * Information about an event-state update location - */ -typedef struct { - const char *str; - em_escope_t escope; -} evstate_info_t; - -/** - * Constant table containing event-state update location information. - * Only accessed when an erroneous event state has been detected and is being - * reported to the error handler. - */ -static const evstate_info_t evstate_info_tbl[] = { - [EVSTATE__UNDEF] = {.str = "undefined", - .escope = (EM_ESCOPE_INTERNAL_MASK | 0)}, - [EVSTATE__PREALLOC] = {.str = "pool-create(prealloc-events)", - .escope = EM_ESCOPE_POOL_CREATE}, - [EVSTATE__ALLOC] = {.str = "em_alloc()", - .escope = EM_ESCOPE_ALLOC}, - [EVSTATE__ALLOC_MULTI] = {.str = "em_alloc_multi()", - .escope = EM_ESCOPE_ALLOC_MULTI}, - [EVSTATE__EVENT_CLONE] = {.str = "em_event_clone()", - .escope = EM_ESCOPE_EVENT_CLONE}, - [EVSTATE__EVENT_REF] = {.str = "em_event_ref()", - .escope = EM_ESCOPE_EVENT_REF}, - [EVSTATE__FREE] = {.str = "em_free()", - .escope = EM_ESCOPE_FREE}, - [EVSTATE__FREE_MULTI] = {.str = "em_free_multi()", - .escope = EM_ESCOPE_FREE_MULTI}, - [EVSTATE__EVENT_VECTOR_FREE] = {.str = "em_event_vector_free()", - .escope = EM_ESCOPE_EVENT_VECTOR_FREE}, - [EVSTATE__INIT] = {.str = "init-event", - .escope = EM_ESCOPE_ODP_EXT}, - [EVSTATE__INIT_MULTI] = {.str = "init-events", - .escope = EM_ESCOPE_ODP_EXT}, - [EVSTATE__INIT_EXTEV] = {.str = "dispatch(init-ext-event)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__INIT_EXTEV_MULTI] = {.str = "dispatch(init-ext-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__UPDATE_EXTEV] = {.str = "dispatch(update-ext-event)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__SEND] = {.str = "em_send()", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__SEND__FAIL] = {.str = "em_send(fail)", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__SEND_EGRP] = {.str = "em_send_group()", - .escope = EM_ESCOPE_SEND_GROUP}, - [EVSTATE__SEND_EGRP__FAIL] = {.str = "em_send_group(fail)", - .escope = EM_ESCOPE_SEND_GROUP}, - [EVSTATE__SEND_MULTI] = {.str = "em_send_multi()", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__SEND_MULTI__FAIL] = {.str = "em_send_multi(fail)", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__SEND_EGRP_MULTI] = {.str = "em_send_group_multi()", - .escope = EM_ESCOPE_SEND_GROUP_MULTI}, - [EVSTATE__SEND_EGRP_MULTI__FAIL] = {.str = "em_send_group_multi(fail)", - .escope = EM_ESCOPE_SEND_GROUP_MULTI}, - [EVSTATE__EO_START_SEND_BUFFERED] = {.str = "eo-start:send-buffered-events()", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__MARK_SEND] = {.str = "em_event_mark_send()", - .escope = EM_ESCOPE_EVENT_MARK_SEND}, - [EVSTATE__UNMARK_SEND] = {.str = "em_event_unmark_send()", - .escope = EM_ESCOPE_EVENT_UNMARK_SEND}, - [EVSTATE__MARK_FREE] = {.str = "em_event_mark_free()", - .escope = EM_ESCOPE_EVENT_MARK_FREE}, - [EVSTATE__UNMARK_FREE] = {.str = "em_event_unmark_free()", - .escope = EM_ESCOPE_EVENT_UNMARK_FREE}, - [EVSTATE__MARK_FREE_MULTI] = {.str = "em_event_mark_free_multi()", - .escope = EM_ESCOPE_EVENT_MARK_FREE_MULTI}, - [EVSTATE__UNMARK_FREE_MULTI] = {.str = "em_event_unmark_free_multi()", - .escope = EM_ESCOPE_EVENT_UNMARK_FREE_MULTI}, - [EVSTATE__DISPATCH] = {.str = "em_dispatch(single-event)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DISPATCH_MULTI] = {.str = "em_dispatch(multiple-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DISPATCH_SCHED__FAIL] = {.str = "em_dispatch(drop sched-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DISPATCH_LOCAL__FAIL] = {.str = "em_dispatch(drop local-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DEQUEUE] = {.str = "em_queue_dequeue()", - .escope = EM_ESCOPE_QUEUE_DEQUEUE}, - [EVSTATE__DEQUEUE_MULTI] = {.str = "em_queue_dequeue_multi()", - .escope = EM_ESCOPE_QUEUE_DEQUEUE_MULTI}, - [EVSTATE__TMO_SET_ABS] = {.str = "em_tmo_set_abs()", - .escope = EM_ESCOPE_TMO_SET_ABS}, - [EVSTATE__TMO_SET_ABS__FAIL] = {.str = "em_tmo_set_abs(fail)", - .escope = EM_ESCOPE_TMO_SET_ABS}, - [EVSTATE__TMO_SET_REL] = {.str = "em_tmo_set_rel()", - .escope = EM_ESCOPE_TMO_SET_REL}, - [EVSTATE__TMO_SET_REL__FAIL] = {.str = "em_tmo_set_rel(fail)", - .escope = EM_ESCOPE_TMO_SET_REL}, - [EVSTATE__TMO_SET_PERIODIC] = {.str = "em_tmo_set_periodic()", - .escope = EM_ESCOPE_TMO_SET_PERIODIC}, - [EVSTATE__TMO_SET_PERIODIC__FAIL] = {.str = "em_tmo_set_periodic(fail)", - .escope = EM_ESCOPE_TMO_SET_PERIODIC}, - [EVSTATE__TMO_CANCEL] = {.str = "em_tmo_cancel()", - .escope = EM_ESCOPE_TMO_CANCEL}, - [EVSTATE__TMO_ACK] = {.str = "em_tmo_ack()", - .escope = EM_ESCOPE_TMO_ACK}, - [EVSTATE__TMO_ACK__NOSKIP] = {.str = "em_tmo_ack(noskip)", - .escope = EM_ESCOPE_TMO_ACK}, - [EVSTATE__TMO_ACK__FAIL] = {.str = "em_tmo_ack(fail)", - .escope = EM_ESCOPE_TMO_ACK}, - [EVSTATE__TMO_CREATE] = {.str = "em_tmo_create()", - .escope = EM_ESCOPE_TMO_CREATE}, - [EVSTATE__TMO_DELETE] = {.str = "em_tmo_delete()", - .escope = EM_ESCOPE_TMO_DELETE}, - [EVSTATE__AG_DELETE] = {.str = "em_atomic_group_delete(flush)", - .escope = EM_ESCOPE_ATOMIC_GROUP_DELETE}, - [EVSTATE__TERM_CORE__QUEUE_LOCAL] = {.str = "em_term_core(local-queue)", - .escope = EM_ESCOPE_TERM_CORE}, - [EVSTATE__TERM] = {.str = "em_term()", - .escope = EM_ESCOPE_TERM}, - /* Last: */ - [EVSTATE__LAST] = {.str = "last", - .escope = (EM_ESCOPE_INTERNAL_MASK | 0)} -}; - -static const char *const help_str_em2usr = -"OK: 'send < ref, both >=0'. Err otherwise"; -static const char *const help_str_usr2em = -"OK: 'send <= ref, both >=0' AND 'hdl evgen == evgen'. Err otherwise"; -static const char *const help_str_usr2em_ref = -"OK: 'send <= ref, both >=0'. Err otherwise"; - -static inline void -esv_update_state(ev_hdr_state_t *const evstate, const uint16_t api_op, - const void *const ev_ptr) -{ - const em_locm_t *const locm = &em_locm; - const uint32_t *const pl_u32 = ev_ptr; - const queue_elem_t *const q_elem = locm->current.q_elem; - - if (ev_ptr) - evstate->payload_first = *pl_u32; - - if (!q_elem) { - evstate->eo_idx = (int16_t)eo_hdl2idx(EM_EO_UNDEF); /* -1 is fine */ - evstate->queue_idx = (int16_t)queue_hdl2idx(EM_QUEUE_UNDEF); /* -1 is fine */ - } else { - evstate->eo_idx = (int16_t)eo_hdl2idx((em_eo_t)(uintptr_t)q_elem->eo); - evstate->queue_idx = (int16_t)queue_hdl2idx((em_queue_t)(uintptr_t)q_elem->queue); - } - evstate->api_op = (uint8_t)api_op; /* no trucation */ - evstate->core = locm->core_id; -} - -static inline void -evhdr_update_state(event_hdr_t *const ev_hdr, const uint16_t api_op) -{ - if (!em_shm->opt.esv.store_state) - return; /* don't store updated state */ - - const void *ev_ptr = NULL; - - if (em_shm->opt.esv.store_first_u32) - ev_ptr = event_pointer(ev_hdr->event); - - esv_update_state(&ev_hdr->state, api_op, ev_ptr); -} - -/* "Normal" ESV Error format */ -#define EVSTATE_ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error -- counts:\t" \ -"send:%" PRIi16 " ref:%" PRIi16 " evgen:%" PRIu16 "(%" PRIu16 ")\n" \ -" Help: %s\n" \ -" prev-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -"=> err-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" - -/* ESV Error format for references */ -#define EVSTATE_REF_ERROR_FMT \ -"ESV: RefEvent:%" PRI_EVENT " state error -- counts:\t" \ -"send:%" PRIi16 " ref:%" PRIi16 " (evgen:%" PRIu16 " ignored for refs)\n" \ -" Help: %s\n" \ -" prev-state:n/a (not valid for event references)\n" \ -"=> err-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" - -/* ESV Error format for em_event_unmark_send/free/_multi() */ -#define EVSTATE_UNMARK_ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ -" prev-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -"=> err-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" - -/* ESV Error format when esv.store_state = false */ -#define EVSTATE__NO_PREV_STATE__ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error -- counts:\t" \ -"send:%" PRIi16 " ref:%" PRIi16 " evgen:%" PRIu16 "(%" PRIu16 ")\n" \ -" Help: %s\n" \ -" prev-state:n/a (disabled in conf)\n" \ -"=> err-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" - -/* ESV Error format for em_event_unmark_send/free/_multi() when esv.store_state = false */ -#define EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ -" prev-state:n/a (disabled in conf)\n" \ -"=> err-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" - -/** - * ESV Error reporting - */ -static inline void -esv_error(const evstate_cnt_t cnt, - evhdl_t evhdl, const event_hdr_t *const ev_hdr, - const uint16_t api_op, bool is_unmark_error, - const char *const help_str) -{ - uint16_t prev_op = ev_hdr->state.api_op; - ev_hdr_state_t prev_state = ev_hdr->state; /* store prev good state */ - ev_hdr_state_t err_state = {0}; /* store current invalid/error state */ - const em_event_t event = event_hdr_to_event(ev_hdr); - const void *ev_ptr = NULL; - - if (unlikely(prev_op > EVSTATE__LAST)) - prev_op = EVSTATE__UNDEF; - - const evstate_info_t *err_info = &evstate_info_tbl[api_op]; - const evstate_info_t *prev_info = &evstate_info_tbl[prev_op]; - - char curr_eoname[EM_EO_NAME_LEN] = "(noname)"; - char prev_eoname[EM_EO_NAME_LEN] = "(noname)"; - char curr_qname[EM_QUEUE_NAME_LEN] = "(noname)"; - char prev_qname[EM_QUEUE_NAME_LEN] = "(noname)"; - char curr_payload[sizeof("0x12345678 ")] = "(n/a)"; - char prev_payload[sizeof("0x12345678 ")] = "(n/a)"; - - const eo_elem_t *eo_elem; - const queue_elem_t *q_elem; - - /* Check event!=undef to avoid error in event_pointer() */ - if (likely(event != EM_EVENT_UNDEF)) - ev_ptr = event_pointer(event); - /* Store the new _invalid_ event-state info into a separate struct */ - esv_update_state(&err_state, api_op, ev_ptr); - - /* - * Print the first 32bits of the event payload on failure, - * the option 'esv.store_payload_first_u32' affects storing during valid - * state transitions. - */ - if (ev_ptr) { - snprintf(curr_payload, sizeof(curr_payload), - "0x%08" PRIx32 "", err_state.payload_first); - curr_payload[sizeof(curr_payload) - 1] = '\0'; - } - - em_eo_t curr_eo = eo_idx2hdl(err_state.eo_idx); - em_queue_t curr_queue = queue_idx2hdl(err_state.queue_idx); - - /* current EO-name: */ - eo_elem = eo_elem_get(curr_eo); - if (eo_elem != NULL) - eo_get_name(eo_elem, curr_eoname, sizeof(curr_eoname)); - /* current queue-name: */ - q_elem = queue_elem_get(curr_queue); - if (q_elem != NULL) - queue_get_name(q_elem, curr_qname, sizeof(curr_qname)); - - const int16_t send_cnt = cnt.send_cnt - SEND_CNT_INIT; - uint16_t evgen_cnt = cnt.evgen - EVGEN_INIT; - const uint16_t evgen_hdl = evhdl.evgen - EVGEN_INIT; - const int16_t ref_cnt = REF_CNT_INIT - cnt.ref_cnt; - - /* Read the previous event state only if it has been stored */ - if (em_shm->opt.esv.store_state) { - /* - * Print the first 32 bits of the event payload for the previous - * valid state transition, if enabled in the EM config file: - * 'esv.store_payload_first_u32 = true', otherwise not stored. - */ - if (em_shm->opt.esv.store_first_u32) { - snprintf(prev_payload, sizeof(prev_payload), - "0x%08" PRIx32 "", prev_state.payload_first); - prev_payload[sizeof(prev_payload) - 1] = '\0'; - } - - em_eo_t prev_eo = eo_idx2hdl(prev_state.eo_idx); - em_queue_t prev_queue = queue_idx2hdl(prev_state.queue_idx); - - /* previous EO-name: */ - eo_elem = eo_elem_get(prev_eo); - if (eo_elem != NULL) - eo_get_name(eo_elem, prev_eoname, sizeof(prev_eoname)); - /* previous queue-name: */ - q_elem = queue_elem_get(prev_queue); - if (q_elem != NULL) - queue_get_name(q_elem, prev_qname, sizeof(prev_qname)); - - if (ev_hdr->flags.refs_used) { - /* Reference ESV Error, prev state available */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, EVSTATE_REF_ERROR_FMT, - event, send_cnt, ref_cnt, evgen_cnt, help_str, - err_info->str, err_state.core, - curr_eo, curr_eoname, curr_queue, curr_qname, - curr_payload, evhdl.event, evhdl.evptr); - } else if (!is_unmark_error) { - /* "Normal" ESV Error, prev state available */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, EVSTATE_ERROR_FMT, - event, send_cnt, ref_cnt, evgen_hdl, evgen_cnt, help_str, - prev_info->str, prev_state.core, prev_eo, prev_eoname, - prev_queue, prev_qname, prev_payload, - err_info->str, err_state.core, curr_eo, curr_eoname, - curr_queue, curr_qname, curr_payload, - evhdl.event, evhdl.evptr); - } else { - /* - * ESV Error from em_event_unmark_send/free/_multi(), - * prev state available. - */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, EVSTATE_UNMARK_ERROR_FMT, - event, - prev_info->str, prev_state.core, - prev_eo, prev_eoname, - prev_queue, prev_qname, prev_payload, - err_info->str, err_state.core, - curr_eo, curr_eoname, - curr_queue, curr_qname, curr_payload); - } - } else { /* em_shm->opt.esv.store_state == false */ - /* No previous state stored by EM at runtime */ - if (!is_unmark_error) { - /* "Normal" ESV Error, prev state not stored */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, EVSTATE__NO_PREV_STATE__ERROR_FMT, - event, send_cnt, ref_cnt, evgen_hdl, evgen_cnt, help_str, - err_info->str, err_state.core, curr_eo, curr_eoname, - curr_queue, curr_qname, curr_payload, - evhdl.event, evhdl.evptr); - } else { - /* - * ESV Error from em_event_unmark_send/free/_multi(), - * prev state not stored. - */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT, - event, - err_info->str, err_state.core, curr_eo, curr_eoname, - curr_queue, curr_qname, curr_payload); - } - } -} - -static void -evstate_error(const evstate_cnt_t cnt, evhdl_t evhdl, - const event_hdr_t *const ev_hdr, const uint16_t api_op, - const char *const help_str) -{ - /* "Normal" ESV Error */ - esv_error(cnt, evhdl, ev_hdr, api_op, false, help_str); -} - -/** - * ESV Error reporting for invalid em_event_unmark...() API use - */ -static void -evstate_unmark_error(const event_hdr_t *const ev_hdr, const uint16_t api_op) -{ - evstate_cnt_t dont_care = {.u64 = 0}; - evhdl_t dont_care_hdl = {.event = EM_EVENT_UNDEF}; - - /* ESV Error from em_event_unmark_send/free/_multi() */ - esv_error(dont_care, dont_care_hdl, ev_hdr, api_op, true, "n/a"); -} - -static inline em_event_t -esv_evinit(const em_event_t event, event_hdr_t *const ev_hdr, - const evstate_cnt_t init_cnt, const uint16_t api_op) -{ - evhdl_t evhdl = {.event = event}; - - evhdl.evgen = EVGEN_INIT; - ev_hdr->event = evhdl.event; - - /* Set initial counters (atomic) */ - __atomic_store_n(&ev_hdr->state_cnt.u64, init_cnt.u64, - __ATOMIC_RELAXED); - /* Set initial state information (non-atomic) */ - evhdr_update_state(ev_hdr, api_op); - - return evhdl.event; -} - -static inline void -esv_evinit_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const evstate_cnt_t init_cnt, const uint16_t api_op) -{ - evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; - - for (int i = 0; i < num; i++) { - evhdl_tbl[i].evgen = EVGEN_INIT; - ev_hdr_tbl[i]->event = evhdl_tbl[i].event; - - /* Set initial counters for ext-events (atomic) */ - __atomic_store_n(&ev_hdr_tbl[i]->state_cnt.u64, - init_cnt.u64, __ATOMIC_RELAXED); - /* Set initial state information (non-atomic) */ - evhdr_update_state(ev_hdr_tbl[i], api_op); - } -} - -static inline em_event_t -esv_evinit_ext(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - /* - * Combination of: - * event = esv_evinit(..., init_cnt_extev, ...) - * return evstate_em2usr(event, ...); - */ - evhdl_t evhdl = {.event = event}; - const evstate_cnt_t init = init_cnt_extev; - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - const evstate_cnt_t cnt = {.u64 = init.u64 - sub.u64}; - - evhdl.evgen = cnt.evgen; - ev_hdr->event = evhdl.event; - - /* Set initial counters (atomic) */ - __atomic_store_n(&ev_hdr->state_cnt.u64, cnt.u64, - __ATOMIC_RELAXED); - - /* Set initial state information (non-atomic) */ - evhdr_update_state(ev_hdr, api_op); - - return evhdl.event; -} - -static inline em_event_t -esv_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, - const evstate_cnt_t cnt, const uint16_t api_op, const bool is_revert) -{ - const bool refs_used = ev_hdr->flags.refs_used; - evhdl_t evhdl = {.event = event}; - evstate_cnt_t new_cnt; - - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert previous em2usr counter update on failed operation */ - new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } else { - /* Normal em2usr counter update */ - new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } - - if (!refs_used) { - evhdl.evgen = new_cnt.evgen; - ev_hdr->event = evhdl.event; - } - - const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; - const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; - - /* - * Check state count: - * OK: send_cnt < ref_cnt and both >=0. Error otherwise. - */ - if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str_em2usr); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - if (!refs_used) - evhdr_update_state(ev_hdr, api_op); - - return evhdl.event; -} - -static inline void -esv_em2usr_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const evstate_cnt_t cnt, const uint16_t api_op, - const bool is_revert) -{ - evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; - evstate_cnt_t new_cnt; - - for (int i = 0; i < num; i++) { - const bool refs_used = ev_hdr_tbl[i]->flags.refs_used; - - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert em2usr counter update on failed operation */ - new_cnt.u64 = - __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } else { - /* Normal em2usr counter update */ - new_cnt.u64 = - __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } - - if (!refs_used) { - evhdl_tbl[i].evgen = new_cnt.evgen; - ev_hdr_tbl[i]->event = evhdl_tbl[i].event; - } - - const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; - const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; - - /* - * Check state count: - * OK: send_cnt < ref_cnt and both >=0. Error otherwise. - */ - if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, evhdl_tbl[i], ev_hdr_tbl[i], - api_op, help_str_em2usr); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - if (!refs_used) - evhdr_update_state(ev_hdr_tbl[i], api_op); - } -} - -static inline void -esv_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, - const evstate_cnt_t cnt, const uint16_t api_op, const bool is_revert) -{ - const bool refs_used = ev_hdr->flags.refs_used; - evhdl_t evhdl = {.event = event}; - evstate_cnt_t new_cnt; - - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert previous usr2em counter update on failed operation */ - new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, - .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; - new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, - add.u64, __ATOMIC_RELAXED); - } - } else { - /* Normal usr2em counter update */ - new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_MAX)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, - .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; - __atomic_fetch_sub(&ev_hdr->state_cnt.u64, sub.u64, - __ATOMIC_RELAXED); - } - /* cmp new_cnt.evgen vs evhdl.evgen of previous gen, thus -1 */ - new_cnt.evgen -= 1; - } - - const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; - const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; - - /* - * Check state count: - * OK: send_cnt <= ref_cnt and both >=0. - * AND - * OK: event handle evgen == evgen count (not checked for references) - * Error otherwise. - * - * Check evgen only for events that never had references. - * Reference usage mixes up the evgen since the same event can be - * sent and freed multiple times. - */ - if (unlikely((send_cnt > ref_cnt || send_cnt < 0) || - (!refs_used && evhdl.evgen != new_cnt.evgen))) { - const char *const help_str = refs_used ? help_str_usr2em_ref : help_str_usr2em; - - /* report fatal event-state error, never return */ - evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - if (!refs_used) - evhdr_update_state(ev_hdr, api_op); -} - -static inline void -esv_usr2em_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const evstate_cnt_t cnt, const uint16_t api_op, - const bool is_revert) -{ - const evhdl_t *const evhdl_tbl = (const evhdl_t *)ev_tbl; - evstate_cnt_t new_cnt; - - for (int i = 0; i < num; i++) { - const bool refs_used = ev_hdr_tbl[i]->flags.refs_used; - - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert usr2em counter update on failed operation */ - new_cnt.u64 = - __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, - .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; - new_cnt.u64 = - __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - add.u64, __ATOMIC_RELAXED); - } - } else { - /* Normal usr2em counter update */ - new_cnt.u64 = - __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_MAX)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, - .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; - __atomic_fetch_sub(&ev_hdr_tbl[i]->state_cnt.u64, sub.u64, - __ATOMIC_RELAXED); - } - - new_cnt.evgen -= 1; - } - - const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; - const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; - - /* - * Check state count: - * OK: send_cnt <= ref_cnt and both >=0. - * AND - * OK: event handle evgen == evgen count (not checked for references) - * Error otherwise. - * - * Check evgen only for events that never had references. - * Reference usage mixes up the evgen since the same event can be - * sent and freed multiple times. - */ - if (unlikely((send_cnt > ref_cnt || send_cnt < 0) || - (!refs_used && evhdl_tbl[i].evgen != new_cnt.evgen))) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, evhdl_tbl[i], ev_hdr_tbl[i], - api_op, help_str_usr2em); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - if (!refs_used) - evhdr_update_state(ev_hdr_tbl[i], api_op); - } -} - -em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr) -{ - return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__PREALLOC); -} - -em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - if (!em_shm->opt.esv.prealloc_pools || ev_hdr->flags.refs_used) - return esv_evinit(event, ev_hdr, init_cnt_alloc, api_op); - - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - return esv_em2usr(event, ev_hdr, sub, api_op, false); -} - -em_event_t evstate_alloc_tmo(const em_event_t event, event_hdr_t *const ev_hdr) -{ - return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__TMO_CREATE); -} - -void evstate_alloc_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num) -{ - if (!em_shm->opt.esv.prealloc_pools) { - esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, - init_cnt_alloc, EVSTATE__ALLOC_MULTI); - return; - } - - /* em_shm->opt.esv.prealloc_pools: */ - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - for (int i = 0; i < num; i++) { - if (ev_hdr_tbl[i]->flags.refs_used) { - ev_tbl[i] = esv_evinit(ev_tbl[i], ev_hdr_tbl[i], - init_cnt_alloc, - EVSTATE__ALLOC_MULTI); - } else { - ev_tbl[i] = esv_em2usr(ev_tbl[i], ev_hdr_tbl[i], sub, - EVSTATE__ALLOC_MULTI, false); - } - } -} - -em_event_t evstate_ref(const em_event_t event, event_hdr_t *const ev_hdr) -{ - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - return esv_em2usr(event, ev_hdr, sub, EVSTATE__EVENT_REF, false); -} - -em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, - bool is_extev) -{ - if (is_extev) - return esv_evinit_ext(event, ev_hdr, EVSTATE__INIT_EXTEV); - else - return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__INIT); -} - -void evstate_init_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - bool is_extev) -{ - uint16_t api_op; - evstate_cnt_t init_cnt; - - if (is_extev) { - api_op = EVSTATE__INIT_EXTEV_MULTI; - init_cnt = init_cnt_extev; - } else { - api_op = EVSTATE__INIT_MULTI; - init_cnt = init_cnt_alloc; - } - - esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, - init_cnt, api_op); -} - -/** - * This is a combined calculation of the following three separate - * calculations: - * - * mark allocated: - * const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - * .ref_cnt = 1, .send_cnt = 0}; - * event = esv_em2usr(event, ev_hdr, sub, api_op, false); - * - * mark sent: - * const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, - * .ref_cnt = 0, .send_cnt = 1}; - * esv_usr2em(event, ev_hdr, add, api_op, false); - * - * mark em2usr for dispatch to user EO: - * const evstate_cnt_t sub2 = {.evgen = 0, .rsvd = 0, - * .ref_cnt = 0, .send_cnt = 1}; - * event = esv_em2usr(event, ev_hdr, sub2, api_op, false); - * - * combined = add - sub - sub2 - * add: {.evgen = 1, .rsvd = 0, .ref_cnt = 0, .send_cnt = 1} - * sub: - {.evgen = 0, .rsvd = 0, .ref_cnt = 1, .send_cnt = 0} - * sub2: - {.evgen = 0, .rsvd = 0, .ref_cnt = 0, .send_cnt = 1} - * ------------------------------------------------------- - * cmb = {.evgen = 1, .rsvd = 0, .ref_cnt =-1, .send_cnt = 0} - */ -static inline em_event_t -esv_update_ext(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 0}; - const evstate_cnt_t cmb = {.u64 = add.u64 - sub.u64}; /* combined, wraps */ - - const bool refs_used = ev_hdr->flags.refs_used; - evhdl_t evhdl = {.event = event}; - evstate_cnt_t new_cnt; - - /* Update state-count and return value of all counters (atomic) */ - new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, - cmb.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_MAX)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t wrap = {.evgen = EVGEN_MAX - EVGEN_INIT, - .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; - new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, wrap.u64, - __ATOMIC_RELAXED); - } - - if (!refs_used) { - evhdl.evgen = new_cnt.evgen; - ev_hdr->event = evhdl.event; - } - - const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; - const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; - - /* - * Check state count: - * OK: send_cnt < ref_cnt and both >=0. Error otherwise. - */ - if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str_em2usr); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - if (!refs_used) - evhdr_update_state(ev_hdr, api_op); - - return evhdl.event; -} - -em_event_t evstate_update(const em_event_t event, event_hdr_t *const ev_hdr, - bool is_extev) -{ - em_event_t ret_event; - - if (is_extev) { - /* combined mark allocated & mark sent */ - ret_event = esv_update_ext(event, ev_hdr, EVSTATE__UPDATE_EXTEV); - } else { - /* mark allocated */ - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - ret_event = esv_em2usr(event, ev_hdr, sub, EVSTATE__UPDATE_EXTEV, false); - } - - return ret_event; -} - -void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - esv_usr2em(event, ev_hdr, add, api_op, false); -} - -void evstate_free_revert(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - esv_usr2em(event, ev_hdr, sub, api_op, true /*revert*/); -} - -void evstate_free_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, add, api_op, false); -} - -void evstate_free_revert_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, - .ref_cnt = 1, .send_cnt = 0}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, sub, api_op, true /*revert*/); -} - -em_event_t evstate_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - return esv_em2usr(event, ev_hdr, sub, api_op, false); -} - -em_event_t evstate_em2usr_revert(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 0, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - return esv_em2usr(event, ev_hdr, add, api_op, true /*revert*/); -} - -void evstate_em2usr_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, sub, api_op, false); -} - -void evstate_em2usr_revert_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 0, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, add, api_op, true /*revert*/); -} - -void evstate_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - esv_usr2em(event, ev_hdr, add, api_op, false); -} - -void evstate_usr2em_revert(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - esv_usr2em(event, ev_hdr, sub, api_op, true /*revert*/); -} - -void evstate_usr2em_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, add, api_op, false); -} - -void evstate_usr2em_revert_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, - .ref_cnt = 0, .send_cnt = 1}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, sub, api_op, true /*revert*/); -} - -/* - * Ensure that em_event_unmark_...() is only called after - * em_event_mark_...() (not after normal em_send/free() etc). - */ -static inline void -check_valid_unmark(const event_hdr_t *ev_hdr, uint16_t api_op, - const uint16_t expected_ops[], const int num_ops) -{ - /* event refs: can't rely on prev api_op */ - if (ev_hdr->flags.refs_used) - return; - - uint16_t prev_op = ev_hdr->state.api_op; - - for (int i = 0; i < num_ops; i++) { - if (prev_op == expected_ops[i]) - return; /* success */ - } - - /* previous API was NOT em_event_mark_..., report FATAL error! */ - evstate_unmark_error(ev_hdr, api_op); -} - -static inline void -check_valid_unmark_multi(event_hdr_t *const ev_hdr_tbl[], const int num_evs, - uint16_t api_op, const uint16_t expected_ops[], const int num_ops) -{ - uint16_t prev_op; - bool is_valid; - - for (int i = 0; i < num_evs; i++) { - /* event refs: can't rely on prev api_op */ - if (ev_hdr_tbl[i]->flags.refs_used) - continue; - - prev_op = ev_hdr_tbl[i]->state.api_op; - is_valid = false; - - for (int j = 0; j < num_ops; j++) { - if (prev_op == expected_ops[j]) { - is_valid = true; - break; /* success */ - } - } - - /* previous API was NOT em_event_mark_..., report FATAL error!*/ - if (unlikely(!is_valid)) - evstate_unmark_error(ev_hdr_tbl[i], api_op); - } -} - -void evstate_unmark_send(const em_event_t event, event_hdr_t *const ev_hdr) -{ - if (em_shm->opt.esv.store_state) { - uint16_t expected_prev_ops[1] = {EVSTATE__MARK_SEND}; - /* - * Ensure that em_event_unmark_send() is only called after - * em_event_mark_send/_multi() (not after em_send() etc). - */ - check_valid_unmark(ev_hdr, EVSTATE__UNMARK_SEND, - expected_prev_ops, 1); - } - - evstate_usr2em_revert(event, ev_hdr, EVSTATE__UNMARK_SEND); -} - -void evstate_unmark_free(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - if (em_shm->opt.esv.store_state) { - uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE, - EVSTATE__MARK_FREE_MULTI}; - /* - * Ensure that em_event_unmark_free() is only called - * after em_event_mark_free() (not after em_free() etc). - */ - check_valid_unmark(ev_hdr, api_op, expected_prev_ops, 2); - } - - evstate_free_revert(event, ev_hdr, api_op); -} - -void evstate_unmark_free_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - if (em_shm->opt.esv.store_state) { - uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE_MULTI, - EVSTATE__MARK_FREE}; - /* - * Ensure that em_event_unmark_free_multi() is only - * called after em_event_mark_free_multi() - * (not after em_free/_multi() etc). - */ - check_valid_unmark_multi(ev_hdr_tbl, num, api_op, - expected_prev_ops, 2); - } - - evstate_free_revert_multi(ev_tbl, ev_hdr_tbl, num, api_op); -} - -static int read_config_file(void) -{ - const char *conf_str; - bool val_bool = false; - int ret; - - EM_PRINT("EM ESV config: (EM_ESV_ENABLE=%d)\n", EM_ESV_ENABLE); - - /* - * Option: esv.enable - runtime enable/disable - */ - conf_str = "esv.enable"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.enable = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - if (!em_shm->opt.esv.enable) { - /* Read no more options if ESV is disabled */ - memset(&em_shm->opt.esv, 0, sizeof(em_shm->opt.esv)); - return 0; - } - - /* - * Option: esv.store_state - */ - conf_str = "esv.store_state"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.store_state = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - /* - * Option: esv.store_payload_first_u32 - */ - conf_str = "esv.store_payload_first_u32"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.store_first_u32 = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - /* - * Option: esv.prealloc_pools - */ - conf_str = "esv.prealloc_pools"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.prealloc_pools = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - return 0; -} - -em_status_t esv_init(void) -{ - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - return EM_OK; -} - -void esv_disabled_warn_config(void) -{ - const char *conf_str = "esv.enable"; - bool val_bool = false; - int ret; - - EM_PRINT("EM ESV config: (EM_ESV_ENABLE=%d)\n", EM_ESV_ENABLE); - EM_PRINT(" ESV disabled\n"); - - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) - return; /* ESV state option not found in runtime, no warning */ - - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - if (unlikely(val_bool)) - EM_PRINT(" WARNING: ESV disabled (build-time) - config file option IGNORED!\n"); -} +/* + * Copyright (c) 2020-2022, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +static int read_config_file(void); + +/** + * Initial counter values set during an alloc-operation: ref=1, send=0 + * (em_alloc/_multi(), em_event_clone()) + */ +static const evstate_cnt_t init_cnt_alloc = {.evgen = EVGEN_INIT, + .rsvd = 0, + .ref_cnt = REF_CNT_INIT - 1, + .send_cnt = 0 + SEND_CNT_INIT}; +/** + * Initial counter values for external events entering into EM + * (event not allocated by EM): ref=1, send=1 + */ +static const evstate_cnt_t init_cnt_extev = {.evgen = EVGEN_INIT, + .rsvd = 0, + .ref_cnt = REF_CNT_INIT - 1, + .send_cnt = 1 + SEND_CNT_INIT}; + +/** + * Information about an event-state update location + */ +typedef struct { + const char *str; + em_escope_t escope; +} evstate_info_t; + +/** + * Constant table containing event-state update location information. + * Only accessed when an erroneous event state has been detected and is being + * reported to the error handler. + */ +static const evstate_info_t evstate_info_tbl[] = { + [EVSTATE__UNDEF] = {.str = "undefined", + .escope = (EM_ESCOPE_INTERNAL_MASK | 0)}, + [EVSTATE__PREALLOC] = {.str = "pool-create(prealloc-events)", + .escope = EM_ESCOPE_POOL_CREATE}, + [EVSTATE__ALLOC] = {.str = "em_alloc()", + .escope = EM_ESCOPE_ALLOC}, + [EVSTATE__ALLOC_MULTI] = {.str = "em_alloc_multi()", + .escope = EM_ESCOPE_ALLOC_MULTI}, + [EVSTATE__EVENT_CLONE] = {.str = "em_event_clone()", + .escope = EM_ESCOPE_EVENT_CLONE}, + [EVSTATE__EVENT_REF] = {.str = "em_event_ref()", + .escope = EM_ESCOPE_EVENT_REF}, + [EVSTATE__FREE] = {.str = "em_free()", + .escope = EM_ESCOPE_FREE}, + [EVSTATE__FREE_MULTI] = {.str = "em_free_multi()", + .escope = EM_ESCOPE_FREE_MULTI}, + [EVSTATE__EVENT_VECTOR_FREE] = {.str = "em_event_vector_free()", + .escope = EM_ESCOPE_EVENT_VECTOR_FREE}, + [EVSTATE__INIT] = {.str = "init-event", + .escope = EM_ESCOPE_ODP_EXT}, + [EVSTATE__INIT_MULTI] = {.str = "init-events", + .escope = EM_ESCOPE_ODP_EXT}, + [EVSTATE__INIT_EXTEV] = {.str = "dispatch(init-ext-event)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__INIT_EXTEV_MULTI] = {.str = "dispatch(init-ext-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__UPDATE_EXTEV] = {.str = "dispatch(update-ext-event)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__SEND] = {.str = "em_send()", + .escope = EM_ESCOPE_SEND}, + [EVSTATE__SEND__FAIL] = {.str = "em_send(fail)", + .escope = EM_ESCOPE_SEND}, + [EVSTATE__SEND_EGRP] = {.str = "em_send_group()", + .escope = EM_ESCOPE_SEND_GROUP}, + [EVSTATE__SEND_EGRP__FAIL] = {.str = "em_send_group(fail)", + .escope = EM_ESCOPE_SEND_GROUP}, + [EVSTATE__SEND_MULTI] = {.str = "em_send_multi()", + .escope = EM_ESCOPE_SEND_MULTI}, + [EVSTATE__SEND_MULTI__FAIL] = {.str = "em_send_multi(fail)", + .escope = EM_ESCOPE_SEND_MULTI}, + [EVSTATE__SEND_EGRP_MULTI] = {.str = "em_send_group_multi()", + .escope = EM_ESCOPE_SEND_GROUP_MULTI}, + [EVSTATE__SEND_EGRP_MULTI__FAIL] = {.str = "em_send_group_multi(fail)", + .escope = EM_ESCOPE_SEND_GROUP_MULTI}, + [EVSTATE__EO_START_SEND_BUFFERED] = {.str = "eo-start:send-buffered-events()", + .escope = EM_ESCOPE_SEND_MULTI}, + [EVSTATE__MARK_SEND] = {.str = "em_event_mark_send()", + .escope = EM_ESCOPE_EVENT_MARK_SEND}, + [EVSTATE__UNMARK_SEND] = {.str = "em_event_unmark_send()", + .escope = EM_ESCOPE_EVENT_UNMARK_SEND}, + [EVSTATE__MARK_FREE] = {.str = "em_event_mark_free()", + .escope = EM_ESCOPE_EVENT_MARK_FREE}, + [EVSTATE__UNMARK_FREE] = {.str = "em_event_unmark_free()", + .escope = EM_ESCOPE_EVENT_UNMARK_FREE}, + [EVSTATE__MARK_FREE_MULTI] = {.str = "em_event_mark_free_multi()", + .escope = EM_ESCOPE_EVENT_MARK_FREE_MULTI}, + [EVSTATE__UNMARK_FREE_MULTI] = {.str = "em_event_unmark_free_multi()", + .escope = EM_ESCOPE_EVENT_UNMARK_FREE_MULTI}, + [EVSTATE__DISPATCH] = {.str = "em_dispatch(single-event)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DISPATCH_MULTI] = {.str = "em_dispatch(multiple-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DISPATCH_SCHED__FAIL] = {.str = "em_dispatch(drop sched-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DISPATCH_LOCAL__FAIL] = {.str = "em_dispatch(drop local-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DEQUEUE] = {.str = "em_queue_dequeue()", + .escope = EM_ESCOPE_QUEUE_DEQUEUE}, + [EVSTATE__DEQUEUE_MULTI] = {.str = "em_queue_dequeue_multi()", + .escope = EM_ESCOPE_QUEUE_DEQUEUE_MULTI}, + [EVSTATE__TMO_SET_ABS] = {.str = "em_tmo_set_abs()", + .escope = EM_ESCOPE_TMO_SET_ABS}, + [EVSTATE__TMO_SET_ABS__FAIL] = {.str = "em_tmo_set_abs(fail)", + .escope = EM_ESCOPE_TMO_SET_ABS}, + [EVSTATE__TMO_SET_REL] = {.str = "em_tmo_set_rel()", + .escope = EM_ESCOPE_TMO_SET_REL}, + [EVSTATE__TMO_SET_REL__FAIL] = {.str = "em_tmo_set_rel(fail)", + .escope = EM_ESCOPE_TMO_SET_REL}, + [EVSTATE__TMO_SET_PERIODIC] = {.str = "em_tmo_set_periodic()", + .escope = EM_ESCOPE_TMO_SET_PERIODIC}, + [EVSTATE__TMO_SET_PERIODIC__FAIL] = {.str = "em_tmo_set_periodic(fail)", + .escope = EM_ESCOPE_TMO_SET_PERIODIC}, + [EVSTATE__TMO_CANCEL] = {.str = "em_tmo_cancel()", + .escope = EM_ESCOPE_TMO_CANCEL}, + [EVSTATE__TMO_ACK] = {.str = "em_tmo_ack()", + .escope = EM_ESCOPE_TMO_ACK}, + [EVSTATE__TMO_ACK__NOSKIP] = {.str = "em_tmo_ack(noskip)", + .escope = EM_ESCOPE_TMO_ACK}, + [EVSTATE__TMO_ACK__FAIL] = {.str = "em_tmo_ack(fail)", + .escope = EM_ESCOPE_TMO_ACK}, + [EVSTATE__TMO_CREATE] = {.str = "em_tmo_create()", + .escope = EM_ESCOPE_TMO_CREATE}, + [EVSTATE__TMO_DELETE] = {.str = "em_tmo_delete()", + .escope = EM_ESCOPE_TMO_DELETE}, + [EVSTATE__AG_DELETE] = {.str = "em_atomic_group_delete(flush)", + .escope = EM_ESCOPE_ATOMIC_GROUP_DELETE}, + [EVSTATE__TERM_CORE__QUEUE_LOCAL] = {.str = "em_term_core(local-queue)", + .escope = EM_ESCOPE_TERM_CORE}, + [EVSTATE__TERM] = {.str = "em_term()", + .escope = EM_ESCOPE_TERM}, + /* Last: */ + [EVSTATE__LAST] = {.str = "last", + .escope = (EM_ESCOPE_INTERNAL_MASK | 0)} +}; + +static const char *const help_str_em2usr = +"OK: 'send < ref, both >=0'. Err otherwise"; +static const char *const help_str_usr2em = +"OK: 'send <= ref, both >=0' AND 'hdl evgen == evgen'. Err otherwise"; +static const char *const help_str_usr2em_ref = +"OK: 'send <= ref, both >=0'. Err otherwise"; + +static inline void +esv_update_state(ev_hdr_state_t *const evstate, const uint16_t api_op, + const void *const ev_ptr) +{ + const em_locm_t *const locm = &em_locm; + const uint32_t *const pl_u32 = ev_ptr; + const queue_elem_t *const q_elem = locm->current.q_elem; + + if (ev_ptr) + evstate->payload_first = *pl_u32; + + if (!q_elem) { + evstate->eo_idx = (int16_t)eo_hdl2idx(EM_EO_UNDEF); /* -1 is fine */ + evstate->queue_idx = (int16_t)queue_hdl2idx(EM_QUEUE_UNDEF); /* -1 is fine */ + } else { + evstate->eo_idx = (int16_t)eo_hdl2idx((em_eo_t)(uintptr_t)q_elem->eo); + evstate->queue_idx = (int16_t)queue_hdl2idx((em_queue_t)(uintptr_t)q_elem->queue); + } + evstate->api_op = (uint8_t)api_op; /* no truncation */ + evstate->core = locm->core_id; +} + +static inline void +evhdr_update_state(event_hdr_t *const ev_hdr, const uint16_t api_op) +{ + if (!em_shm->opt.esv.store_state) + return; /* don't store updated state */ + + const void *ev_ptr = NULL; + + if (em_shm->opt.esv.store_first_u32) + ev_ptr = event_pointer(ev_hdr->event); + + esv_update_state(&ev_hdr->state, api_op, ev_ptr); +} + +/* "Normal" ESV Error format */ +#define EVSTATE_ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error -- counts:\t" \ +"send:%" PRIi16 " ref:%" PRIi16 " evgen:%" PRIu16 "(%" PRIu16 ")\n" \ +" Help: %s\n" \ +" prev-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" + +/* ESV Error format for references */ +#define EVSTATE_REF_ERROR_FMT \ +"ESV: RefEvent:%" PRI_EVENT " state error -- counts:\t" \ +"send:%" PRIi16 " ref:%" PRIi16 " (evgen:%" PRIu16 " ignored for refs)\n" \ +" Help: %s\n" \ +" prev-state:n/a (not valid for event references)\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" + +/* ESV Error format for em_event_unmark_send/free/_multi() */ +#define EVSTATE_UNMARK_ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ +" prev-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" + +/* ESV Error format when esv.store_state = false */ +#define EVSTATE__NO_PREV_STATE__ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error -- counts:\t" \ +"send:%" PRIi16 " ref:%" PRIi16 " evgen:%" PRIu16 "(%" PRIu16 ")\n" \ +" Help: %s\n" \ +" prev-state:n/a (disabled in conf)\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" + +/* ESV Error format for em_event_unmark_send/free/_multi() when esv.store_state = false */ +#define EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ +" prev-state:n/a (disabled in conf)\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" + +/** + * ESV Error reporting + */ +static inline void +esv_error(const evstate_cnt_t cnt, + evhdl_t evhdl, const event_hdr_t *const ev_hdr, + const uint16_t api_op, bool is_unmark_error, + const char *const help_str) +{ + uint16_t prev_op = ev_hdr->state.api_op; + ev_hdr_state_t prev_state = ev_hdr->state; /* store prev good state */ + ev_hdr_state_t err_state = {0}; /* store current invalid/error state */ + const em_event_t event = event_hdr_to_event(ev_hdr); + const void *ev_ptr = NULL; + + if (unlikely(prev_op > EVSTATE__LAST)) + prev_op = EVSTATE__UNDEF; + + const evstate_info_t *err_info = &evstate_info_tbl[api_op]; + const evstate_info_t *prev_info = &evstate_info_tbl[prev_op]; + + char curr_eoname[EM_EO_NAME_LEN] = "(noname)"; + char prev_eoname[EM_EO_NAME_LEN] = "(noname)"; + char curr_qname[EM_QUEUE_NAME_LEN] = "(noname)"; + char prev_qname[EM_QUEUE_NAME_LEN] = "(noname)"; + char curr_payload[sizeof("0x12345678 ")] = "(n/a)"; + char prev_payload[sizeof("0x12345678 ")] = "(n/a)"; + + const eo_elem_t *eo_elem; + const queue_elem_t *q_elem; + + /* Check event!=undef to avoid error in event_pointer() */ + if (likely(event != EM_EVENT_UNDEF)) + ev_ptr = event_pointer(event); + /* Store the new _invalid_ event-state info into a separate struct */ + esv_update_state(&err_state, api_op, ev_ptr); + + /* + * Print the first 32bits of the event payload on failure, + * the option 'esv.store_payload_first_u32' affects storing during valid + * state transitions. + */ + if (ev_ptr) { + snprintf(curr_payload, sizeof(curr_payload), + "0x%08" PRIx32 "", err_state.payload_first); + curr_payload[sizeof(curr_payload) - 1] = '\0'; + } + + em_eo_t curr_eo = eo_idx2hdl(err_state.eo_idx); + em_queue_t curr_queue = queue_idx2hdl(err_state.queue_idx); + + /* current EO-name: */ + eo_elem = eo_elem_get(curr_eo); + if (eo_elem != NULL) + eo_get_name(eo_elem, curr_eoname, sizeof(curr_eoname)); + /* current queue-name: */ + q_elem = queue_elem_get(curr_queue); + if (q_elem != NULL) + queue_get_name(q_elem, curr_qname, sizeof(curr_qname)); + + const int16_t send_cnt = cnt.send_cnt - SEND_CNT_INIT; + uint16_t evgen_cnt = cnt.evgen - EVGEN_INIT; + const uint16_t evgen_hdl = evhdl.evgen - EVGEN_INIT; + const int16_t ref_cnt = REF_CNT_INIT - cnt.ref_cnt; + + /* Read the previous event state only if it has been stored */ + if (em_shm->opt.esv.store_state) { + /* + * Print the first 32 bits of the event payload for the previous + * valid state transition, if enabled in the EM config file: + * 'esv.store_payload_first_u32 = true', otherwise not stored. + */ + if (em_shm->opt.esv.store_first_u32) { + snprintf(prev_payload, sizeof(prev_payload), + "0x%08" PRIx32 "", prev_state.payload_first); + prev_payload[sizeof(prev_payload) - 1] = '\0'; + } + + em_eo_t prev_eo = eo_idx2hdl(prev_state.eo_idx); + em_queue_t prev_queue = queue_idx2hdl(prev_state.queue_idx); + + /* previous EO-name: */ + eo_elem = eo_elem_get(prev_eo); + if (eo_elem != NULL) + eo_get_name(eo_elem, prev_eoname, sizeof(prev_eoname)); + /* previous queue-name: */ + q_elem = queue_elem_get(prev_queue); + if (q_elem != NULL) + queue_get_name(q_elem, prev_qname, sizeof(prev_qname)); + + if (ev_hdr->flags.refs_used) { + /* Reference ESV Error, prev state available */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE_REF_ERROR_FMT, + event, send_cnt, ref_cnt, evgen_cnt, help_str, + err_info->str, err_state.core, + curr_eo, curr_eoname, curr_queue, curr_qname, + curr_payload, evhdl.event, evhdl.evptr); + } else if (!is_unmark_error) { + /* "Normal" ESV Error, prev state available */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE_ERROR_FMT, + event, send_cnt, ref_cnt, evgen_hdl, evgen_cnt, help_str, + prev_info->str, prev_state.core, prev_eo, prev_eoname, + prev_queue, prev_qname, prev_payload, + err_info->str, err_state.core, curr_eo, curr_eoname, + curr_queue, curr_qname, curr_payload, + evhdl.event, evhdl.evptr); + } else { + /* + * ESV Error from em_event_unmark_send/free/_multi(), + * prev state available. + */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE_UNMARK_ERROR_FMT, + event, + prev_info->str, prev_state.core, + prev_eo, prev_eoname, + prev_queue, prev_qname, prev_payload, + err_info->str, err_state.core, + curr_eo, curr_eoname, + curr_queue, curr_qname, curr_payload); + } + } else { /* em_shm->opt.esv.store_state == false */ + /* No previous state stored by EM at runtime */ + if (!is_unmark_error) { + /* "Normal" ESV Error, prev state not stored */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE__NO_PREV_STATE__ERROR_FMT, + event, send_cnt, ref_cnt, evgen_hdl, evgen_cnt, help_str, + err_info->str, err_state.core, curr_eo, curr_eoname, + curr_queue, curr_qname, curr_payload, + evhdl.event, evhdl.evptr); + } else { + /* + * ESV Error from em_event_unmark_send/free/_multi(), + * prev state not stored. + */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT, + event, + err_info->str, err_state.core, curr_eo, curr_eoname, + curr_queue, curr_qname, curr_payload); + } + } +} + +static void +evstate_error(const evstate_cnt_t cnt, evhdl_t evhdl, + const event_hdr_t *const ev_hdr, const uint16_t api_op, + const char *const help_str) +{ + /* "Normal" ESV Error */ + esv_error(cnt, evhdl, ev_hdr, api_op, false, help_str); +} + +/** + * ESV Error reporting for invalid em_event_unmark...() API use + */ +static void +evstate_unmark_error(const event_hdr_t *const ev_hdr, const uint16_t api_op) +{ + evstate_cnt_t dont_care = {.u64 = 0}; + evhdl_t dont_care_hdl = {.event = EM_EVENT_UNDEF}; + + /* ESV Error from em_event_unmark_send/free/_multi() */ + esv_error(dont_care, dont_care_hdl, ev_hdr, api_op, true, "n/a"); +} + +static inline em_event_t +esv_evinit(const em_event_t event, event_hdr_t *const ev_hdr, + const evstate_cnt_t init_cnt, const uint16_t api_op) +{ + evhdl_t evhdl = {.event = event}; + + evhdl.evgen = EVGEN_INIT; + ev_hdr->event = evhdl.event; + + /* Set initial counters (atomic) */ + __atomic_store_n(&ev_hdr->state_cnt.u64, init_cnt.u64, + __ATOMIC_RELAXED); + /* Set initial state information (non-atomic) */ + evhdr_update_state(ev_hdr, api_op); + + return evhdl.event; +} + +static inline void +esv_evinit_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const evstate_cnt_t init_cnt, const uint16_t api_op) +{ + evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; + + for (int i = 0; i < num; i++) { + evhdl_tbl[i].evgen = EVGEN_INIT; + ev_hdr_tbl[i]->event = evhdl_tbl[i].event; + + /* Set initial counters for ext-events (atomic) */ + __atomic_store_n(&ev_hdr_tbl[i]->state_cnt.u64, + init_cnt.u64, __ATOMIC_RELAXED); + /* Set initial state information (non-atomic) */ + evhdr_update_state(ev_hdr_tbl[i], api_op); + } +} + +static inline em_event_t +esv_evinit_ext(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + /* + * Combination of: + * event = esv_evinit(..., init_cnt_extev, ...) + * return evstate_em2usr(event, ...); + */ + evhdl_t evhdl = {.event = event}; + const evstate_cnt_t init = init_cnt_extev; + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + const evstate_cnt_t cnt = {.u64 = init.u64 - sub.u64}; + + evhdl.evgen = cnt.evgen; + ev_hdr->event = evhdl.event; + + /* Set initial counters (atomic) */ + __atomic_store_n(&ev_hdr->state_cnt.u64, cnt.u64, + __ATOMIC_RELAXED); + + /* Set initial state information (non-atomic) */ + evhdr_update_state(ev_hdr, api_op); + + return evhdl.event; +} + +static inline em_event_t +esv_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, + const evstate_cnt_t cnt, const uint16_t api_op, const bool is_revert) +{ + const bool refs_used = ev_hdr->flags.refs_used; + evhdl_t evhdl = {.event = event}; + evstate_cnt_t new_cnt; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert previous em2usr counter update on failed operation */ + new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } else { + /* Normal em2usr counter update */ + new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } + + if (!refs_used) { + evhdl.evgen = new_cnt.evgen; + ev_hdr->event = evhdl.event; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt < ref_cnt and both >=0. Error otherwise. + */ + if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str_em2usr); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr, api_op); + + return evhdl.event; +} + +static inline void +esv_em2usr_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const evstate_cnt_t cnt, const uint16_t api_op, + const bool is_revert) +{ + evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; + evstate_cnt_t new_cnt; + + for (int i = 0; i < num; i++) { + const bool refs_used = ev_hdr_tbl[i]->flags.refs_used; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert em2usr counter update on failed operation */ + new_cnt.u64 = + __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } else { + /* Normal em2usr counter update */ + new_cnt.u64 = + __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } + + if (!refs_used) { + evhdl_tbl[i].evgen = new_cnt.evgen; + ev_hdr_tbl[i]->event = evhdl_tbl[i].event; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt < ref_cnt and both >=0. Error otherwise. + */ + if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl_tbl[i], ev_hdr_tbl[i], + api_op, help_str_em2usr); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr_tbl[i], api_op); + } +} + +static inline void +esv_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, + const evstate_cnt_t cnt, const uint16_t api_op, const bool is_revert) +{ + const bool refs_used = ev_hdr->flags.refs_used; + evhdl_t evhdl = {.event = event}; + evstate_cnt_t new_cnt; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert previous usr2em counter update on failed operation */ + new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, + add.u64, __ATOMIC_RELAXED); + } + } else { + /* Normal usr2em counter update */ + new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_MAX)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + __atomic_fetch_sub(&ev_hdr->state_cnt.u64, sub.u64, + __ATOMIC_RELAXED); + } + /* cmp new_cnt.evgen vs evhdl.evgen of previous gen, thus -1 */ + new_cnt.evgen -= 1; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt <= ref_cnt and both >=0. + * AND + * OK: event handle evgen == evgen count (not checked for references) + * Error otherwise. + * + * Check evgen only for events that never had references. + * Reference usage mixes up the evgen since the same event can be + * sent and freed multiple times. + */ + if (unlikely((send_cnt > ref_cnt || send_cnt < 0) || + (!refs_used && evhdl.evgen != new_cnt.evgen))) { + const char *const help_str = refs_used ? help_str_usr2em_ref : help_str_usr2em; + + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr, api_op); +} + +static inline void +esv_usr2em_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const evstate_cnt_t cnt, const uint16_t api_op, + const bool is_revert) +{ + const evhdl_t *const evhdl_tbl = (const evhdl_t *)ev_tbl; + evstate_cnt_t new_cnt; + + for (int i = 0; i < num; i++) { + const bool refs_used = ev_hdr_tbl[i]->flags.refs_used; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert usr2em counter update on failed operation */ + new_cnt.u64 = + __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + new_cnt.u64 = + __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + add.u64, __ATOMIC_RELAXED); + } + } else { + /* Normal usr2em counter update */ + new_cnt.u64 = + __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_MAX)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + __atomic_fetch_sub(&ev_hdr_tbl[i]->state_cnt.u64, sub.u64, + __ATOMIC_RELAXED); + } + + new_cnt.evgen -= 1; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt <= ref_cnt and both >=0. + * AND + * OK: event handle evgen == evgen count (not checked for references) + * Error otherwise. + * + * Check evgen only for events that never had references. + * Reference usage mixes up the evgen since the same event can be + * sent and freed multiple times. + */ + if (unlikely((send_cnt > ref_cnt || send_cnt < 0) || + (!refs_used && evhdl_tbl[i].evgen != new_cnt.evgen))) { + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl_tbl[i], ev_hdr_tbl[i], + api_op, help_str_usr2em); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr_tbl[i], api_op); + } +} + +em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr) +{ + return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__PREALLOC); +} + +em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + if (!em_shm->opt.esv.prealloc_pools || ev_hdr->flags.refs_used) + return esv_evinit(event, ev_hdr, init_cnt_alloc, api_op); + + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + return esv_em2usr(event, ev_hdr, sub, api_op, false); +} + +em_event_t evstate_alloc_tmo(const em_event_t event, event_hdr_t *const ev_hdr) +{ + return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__TMO_CREATE); +} + +void evstate_alloc_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num) +{ + if (!em_shm->opt.esv.prealloc_pools) { + esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, + init_cnt_alloc, EVSTATE__ALLOC_MULTI); + return; + } + + /* em_shm->opt.esv.prealloc_pools: */ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + for (int i = 0; i < num; i++) { + if (ev_hdr_tbl[i]->flags.refs_used) { + ev_tbl[i] = esv_evinit(ev_tbl[i], ev_hdr_tbl[i], + init_cnt_alloc, + EVSTATE__ALLOC_MULTI); + } else { + ev_tbl[i] = esv_em2usr(ev_tbl[i], ev_hdr_tbl[i], sub, + EVSTATE__ALLOC_MULTI, false); + } + } +} + +em_event_t evstate_ref(const em_event_t event, event_hdr_t *const ev_hdr) +{ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + return esv_em2usr(event, ev_hdr, sub, EVSTATE__EVENT_REF, false); +} + +em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, + bool is_extev) +{ + if (is_extev) + return esv_evinit_ext(event, ev_hdr, EVSTATE__INIT_EXTEV); + else + return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__INIT); +} + +void evstate_init_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + bool is_extev) +{ + uint16_t api_op; + evstate_cnt_t init_cnt; + + if (is_extev) { + api_op = EVSTATE__INIT_EXTEV_MULTI; + init_cnt = init_cnt_extev; + } else { + api_op = EVSTATE__INIT_MULTI; + init_cnt = init_cnt_alloc; + } + + esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, + init_cnt, api_op); +} + +/** + * This is a combined calculation of the following three separate + * calculations: + * + * mark allocated: + * const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + * .ref_cnt = 1, .send_cnt = 0}; + * event = esv_em2usr(event, ev_hdr, sub, api_op, false); + * + * mark sent: + * const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + * .ref_cnt = 0, .send_cnt = 1}; + * esv_usr2em(event, ev_hdr, add, api_op, false); + * + * mark em2usr for dispatch to user EO: + * const evstate_cnt_t sub2 = {.evgen = 0, .rsvd = 0, + * .ref_cnt = 0, .send_cnt = 1}; + * event = esv_em2usr(event, ev_hdr, sub2, api_op, false); + * + * combined = add - sub - sub2 + * add: {.evgen = 1, .rsvd = 0, .ref_cnt = 0, .send_cnt = 1} + * sub: - {.evgen = 0, .rsvd = 0, .ref_cnt = 1, .send_cnt = 0} + * sub2: - {.evgen = 0, .rsvd = 0, .ref_cnt = 0, .send_cnt = 1} + * ------------------------------------------------------- + * cmb = {.evgen = 1, .rsvd = 0, .ref_cnt =-1, .send_cnt = 0} + */ +static inline em_event_t +esv_update_ext(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 0}; + const evstate_cnt_t cmb = {.u64 = add.u64 - sub.u64}; /* combined, wraps */ + + const bool refs_used = ev_hdr->flags.refs_used; + evhdl_t evhdl = {.event = event}; + evstate_cnt_t new_cnt; + + /* Update state-count and return value of all counters (atomic) */ + new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, + cmb.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_MAX)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t wrap = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, wrap.u64, + __ATOMIC_RELAXED); + } + + if (!refs_used) { + evhdl.evgen = new_cnt.evgen; + ev_hdr->event = evhdl.event; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt < ref_cnt and both >=0. Error otherwise. + */ + if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str_em2usr); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr, api_op); + + return evhdl.event; +} + +em_event_t evstate_update(const em_event_t event, event_hdr_t *const ev_hdr, + bool is_extev) +{ + em_event_t ret_event; + + if (is_extev) { + /* combined mark allocated & mark sent */ + ret_event = esv_update_ext(event, ev_hdr, EVSTATE__UPDATE_EXTEV); + } else { + /* mark allocated */ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + ret_event = esv_em2usr(event, ev_hdr, sub, EVSTATE__UPDATE_EXTEV, false); + } + + return ret_event; +} + +void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em(event, ev_hdr, add, api_op, false); +} + +void evstate_free_revert(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em(event, ev_hdr, sub, api_op, true /*revert*/); +} + +void evstate_free_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, add, api_op, false); +} + +void evstate_free_revert_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, sub, api_op, true /*revert*/); +} + +em_event_t evstate_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + return esv_em2usr(event, ev_hdr, sub, api_op, false); +} + +em_event_t evstate_em2usr_revert(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + return esv_em2usr(event, ev_hdr, add, api_op, true /*revert*/); +} + +void evstate_em2usr_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, sub, api_op, false); +} + +void evstate_em2usr_revert_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, add, api_op, true /*revert*/); +} + +void evstate_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em(event, ev_hdr, add, api_op, false); +} + +void evstate_usr2em_revert(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em(event, ev_hdr, sub, api_op, true /*revert*/); +} + +void evstate_usr2em_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, add, api_op, false); +} + +void evstate_usr2em_revert_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, sub, api_op, true /*revert*/); +} + +/* + * Ensure that em_event_unmark_...() is only called after + * em_event_mark_...() (not after normal em_send/free() etc). + */ +static inline void +check_valid_unmark(const event_hdr_t *ev_hdr, uint16_t api_op, + const uint16_t expected_ops[], const int num_ops) +{ + /* event refs: can't rely on prev api_op */ + if (ev_hdr->flags.refs_used) + return; + + uint16_t prev_op = ev_hdr->state.api_op; + + for (int i = 0; i < num_ops; i++) { + if (prev_op == expected_ops[i]) + return; /* success */ + } + + /* previous API was NOT em_event_mark_..., report FATAL error! */ + evstate_unmark_error(ev_hdr, api_op); +} + +static inline void +check_valid_unmark_multi(event_hdr_t *const ev_hdr_tbl[], const int num_evs, + uint16_t api_op, const uint16_t expected_ops[], const int num_ops) +{ + uint16_t prev_op; + bool is_valid; + + for (int i = 0; i < num_evs; i++) { + /* event refs: can't rely on prev api_op */ + if (ev_hdr_tbl[i]->flags.refs_used) + continue; + + prev_op = ev_hdr_tbl[i]->state.api_op; + is_valid = false; + + for (int j = 0; j < num_ops; j++) { + if (prev_op == expected_ops[j]) { + is_valid = true; + break; /* success */ + } + } + + /* previous API was NOT em_event_mark_..., report FATAL error!*/ + if (unlikely(!is_valid)) + evstate_unmark_error(ev_hdr_tbl[i], api_op); + } +} + +void evstate_unmark_send(const em_event_t event, event_hdr_t *const ev_hdr) +{ + if (em_shm->opt.esv.store_state) { + uint16_t expected_prev_ops[1] = {EVSTATE__MARK_SEND}; + /* + * Ensure that em_event_unmark_send() is only called after + * em_event_mark_send/_multi() (not after em_send() etc). + */ + check_valid_unmark(ev_hdr, EVSTATE__UNMARK_SEND, + expected_prev_ops, 1); + } + + evstate_usr2em_revert(event, ev_hdr, EVSTATE__UNMARK_SEND); +} + +void evstate_unmark_free(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + if (em_shm->opt.esv.store_state) { + uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE, + EVSTATE__MARK_FREE_MULTI}; + /* + * Ensure that em_event_unmark_free() is only called + * after em_event_mark_free() (not after em_free() etc). + */ + check_valid_unmark(ev_hdr, api_op, expected_prev_ops, 2); + } + + evstate_free_revert(event, ev_hdr, api_op); +} + +void evstate_unmark_free_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + if (em_shm->opt.esv.store_state) { + uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE_MULTI, + EVSTATE__MARK_FREE}; + /* + * Ensure that em_event_unmark_free_multi() is only + * called after em_event_mark_free_multi() + * (not after em_free/_multi() etc). + */ + check_valid_unmark_multi(ev_hdr_tbl, num, api_op, + expected_prev_ops, 2); + } + + evstate_free_revert_multi(ev_tbl, ev_hdr_tbl, num, api_op); +} + +static int read_config_file(void) +{ + const char *conf_str; + bool val_bool = false; + int ret; + + EM_PRINT("EM ESV config: (EM_ESV_ENABLE=%d)\n", EM_ESV_ENABLE); + + /* + * Option: esv.enable - runtime enable/disable + */ + conf_str = "esv.enable"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.enable = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + if (!em_shm->opt.esv.enable) { + /* Read no more options if ESV is disabled */ + memset(&em_shm->opt.esv, 0, sizeof(em_shm->opt.esv)); + return 0; + } + + /* + * Option: esv.store_state + */ + conf_str = "esv.store_state"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.store_state = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + /* + * Option: esv.store_payload_first_u32 + */ + conf_str = "esv.store_payload_first_u32"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.store_first_u32 = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + /* + * Option: esv.prealloc_pools + */ + conf_str = "esv.prealloc_pools"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.prealloc_pools = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + return 0; +} + +em_status_t esv_init(void) +{ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + return EM_OK; +} + +void esv_disabled_warn_config(void) +{ + const char *conf_str = "esv.enable"; + bool val_bool = false; + int ret; + + EM_PRINT("EM ESV config: (EM_ESV_ENABLE=%d)\n", EM_ESV_ENABLE); + EM_PRINT(" ESV disabled\n"); + + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) + return; /* ESV state option not found in runtime, no warning */ + + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + if (unlikely(val_bool)) + EM_PRINT(" WARNING: ESV disabled (build-time) - config file option IGNORED!\n"); +} diff --git a/src/em_mem.h b/src/em_mem.h index 3b2d6fba..c64e2712 100644 --- a/src/em_mem.h +++ b/src/em_mem.h @@ -1,260 +1,260 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * EM Shared & Local Memory data - * - */ - -#ifndef EM_MEM_H_ -#define EM_MEM_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/** - * EM shared memory data - * - * Struct contains data that is shared between all EM-cores, - * i.e. shared between all EM-processes or EM-threads depending on the setup. - */ -typedef struct { - /** Handle for this shared memory */ - odp_shm_t this_shm; - /** EM internal log function, overridable via em_conf, var args */ - em_log_func_t log_fn; - /** EM internal log function, overridable via em_conf, va_list */ - em_vlog_func_t vlog_fn; - /** EM configuration as given to em_init() */ - em_conf_t conf ENV_CACHE_LINE_ALIGNED; - /** EM config file options */ - opt_t opt ENV_CACHE_LINE_ALIGNED; - /** Mapping between physical core id <-> EM core id */ - core_map_t core_map ENV_CACHE_LINE_ALIGNED; - /** Table of buffer/packet/event pools used by EM */ - mpool_tbl_t mpool_tbl ENV_CACHE_LINE_ALIGNED; - /** Pool of free event/mempools */ - mpool_pool_t mpool_pool ENV_CACHE_LINE_ALIGNED; - /** EO table */ - eo_tbl_t eo_tbl ENV_CACHE_LINE_ALIGNED; - /** EO pool of free/unused EOs */ - eo_pool_t eo_pool ENV_CACHE_LINE_ALIGNED; - /** Event Chaining resources */ - event_chaining_t event_chaining ENV_CACHE_LINE_ALIGNED; - /** Queue table */ - queue_tbl_t queue_tbl ENV_CACHE_LINE_ALIGNED; - /** Queue pool of free/unused dynamic queues */ - queue_pool_t queue_pool ENV_CACHE_LINE_ALIGNED; - /** Queue pool of free/unused static queues */ - queue_pool_t queue_pool_static ENV_CACHE_LINE_ALIGNED; - /** Queue group table */ - queue_group_tbl_t queue_group_tbl ENV_CACHE_LINE_ALIGNED; - /** Queue group pool of free/unused queue groups */ - queue_group_pool_t queue_group_pool ENV_CACHE_LINE_ALIGNED; - /** Atomic group table */ - atomic_group_tbl_t atomic_group_tbl ENV_CACHE_LINE_ALIGNED; - /** Dynamic atomic group pool */ - atomic_group_pool_t atomic_group_pool ENV_CACHE_LINE_ALIGNED; - /** Event group table */ - event_group_tbl_t event_group_tbl ENV_CACHE_LINE_ALIGNED; - /** Event group pool of free/unused queue groups */ - event_group_pool_t event_group_pool ENV_CACHE_LINE_ALIGNED; - /** Error handler structure */ - error_handler_t error_handler ENV_CACHE_LINE_ALIGNED; - - /** Dispatcher enter callback functions currently in use */ - hook_tbl_t *dispatch_enter_cb_tbl ENV_CACHE_LINE_ALIGNED; - /** Dispatcher exit callback functions currently in use */ - hook_tbl_t *dispatch_exit_cb_tbl; - /** Alloc-hook functions currently in use */ - hook_tbl_t *alloc_hook_tbl; - /** Free-hook functions currently in use */ - hook_tbl_t *free_hook_tbl; - /** Send-hook functions currently in use */ - hook_tbl_t *send_hook_tbl; - /** To_idle hook functions currently in use */ - hook_tbl_t *to_idle_hook_tbl; - /** To_active hook functions currently in use */ - hook_tbl_t *to_active_hook_tbl; - /** While_idle hook functions currently in use */ - hook_tbl_t *while_idle_hook_tbl; - - /** Dispatch enter callback storage, many sets of callback-tables */ - hook_storage_t dispatch_enter_cb_storage ENV_CACHE_LINE_ALIGNED; - /** Dispatch exit callback storage, many sets of callback-tables */ - hook_storage_t dispatch_exit_cb_storage ENV_CACHE_LINE_ALIGNED; - /** Alloc-hook function storage, many sets of hook-tables */ - hook_storage_t alloc_hook_storage ENV_CACHE_LINE_ALIGNED; - /** Free-hook function storage, many sets of hook-tables */ - hook_storage_t free_hook_storage ENV_CACHE_LINE_ALIGNED; - /** Send-hook function storage, many sets of hook-tables */ - hook_storage_t send_hook_storage ENV_CACHE_LINE_ALIGNED; - /** To_idle hook functions storage, many sets of hook-tables */ - hook_storage_t to_idle_hook_storage; - /** To_active hook functions storage, many sets of hook-tables */ - hook_storage_t to_active_hook_storage; - /** While_idle hook functions storage, many sets of hook-tables */ - hook_storage_t while_idle_hook_storage; - - /** Current number of allocated EOs */ - env_atomic32_t eo_count ENV_CACHE_LINE_ALIGNED; - /** Timer resources */ - timer_storage_t timers ENV_CACHE_LINE_ALIGNED; - /** Current number of allocated queues */ - env_atomic32_t queue_count ENV_CACHE_LINE_ALIGNED; - /** Current number of allocated queue groups */ - env_atomic32_t queue_group_count; - /** Current number of allocated event groups */ - env_atomic32_t event_group_count; - /** Current number of allocated atomic groups */ - env_atomic32_t atomic_group_count; - /** Current number of allocated event pools */ - env_atomic32_t pool_count; - /** libconfig setting, default (compiled) and runtime (from file) */ - libconfig_t libconfig; - /** priority mapping */ - struct { - /** mapping table */ - int map[EM_QUEUE_PRIO_NUM]; - int num_runtime; - } queue_prio; - - /** Guarantee that size is a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} em_shm_t; - -COMPILE_TIME_ASSERT(sizeof(em_shm_t) % ENV_CACHE_LINE_SIZE == 0, - EM_SHM_SIZE_ERROR); - -/** - * EM core/local current state - * - * Contains information about the current EO, queue, event group etc. when - * running in an EO context (e.g. in an EO-receive function), - * undef/NULL otherwise. - */ -typedef struct ODP_PACKED { - /** Current scheduling context type */ - em_sched_context_type_t sched_context_type; - /** EO-receive function burst count */ - int rcv_multi_cnt; - /** Current queue element during a receive call */ - queue_elem_t *q_elem; - /** Current scheduled queue element that set the sched context*/ - queue_elem_t *sched_q_elem; - /** Current event group */ - em_event_group_t egrp; - /** Current event group element */ - event_group_elem_t *egrp_elem; - /** Current event group generation count*/ - int32_t egrp_gen; -} em_locm_current_t; - -/** - * EM core local data - */ -typedef struct { - /** EM core/local current state */ - em_locm_current_t current; - - /** Idle state of the core, used when calling idle hooks */ - idle_state_t idle_state; - - /** EM core id for this core */ - int core_id; - /** The number of events from the scheduler to dispatch */ - int event_burst_cnt; - - /** em_atomic_processing_end() called during event dispatch */ - bool atomic_group_released; - /** Is input_poll_fn executed on this core */ - bool do_input_poll; - /** Is output_drain_fn executed on this core */ - bool do_output_drain; - /** Is thread external to EM (doesn't participate in event dispatching) */ - bool is_external_thr; - /** Is the scheduler paused on this core (for odp_sched_pause/resume()) */ - bool is_sched_paused; - - /** Number of dispatch rounds since previous polling of ctrl queues */ - unsigned int dispatch_cnt; - /** Time when polling of ctrl queues where last done */ - odp_time_t dispatch_last_run; - - /** Number of dispatch rounds since previous call of poll/drain functions */ - unsigned int poll_drain_dispatch_cnt; - /** Time when poll and drain functions were last called */ - odp_time_t poll_drain_dispatch_last_run; - - /** Local queues, i.e. storage for events to local queues */ - local_queues_t local_queues; - - /** EO start-function ongoing, buffer all events and send after start */ - eo_elem_t *start_eo_elem; - /** The number of errors on a core */ - uint64_t error_count; - - /** EM-core local log function */ - em_log_func_t log_fn; - - /** EM-core local log function with va_list */ - em_vlog_func_t vlog_fn; - - /** Synchronous API */ - sync_api_t sync_api; - - /** dispatcher debug timestamps (ns) */ - uint64_t debug_ts[EM_DEBUG_TSP_LAST]; - - /** Track output-queues used during this dispatch round (burst) */ - output_queue_track_t output_queue_track; - - /** Guarantee that size is a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} em_locm_t; - -COMPILE_TIME_ASSERT((sizeof(em_locm_t) % ENV_CACHE_LINE_SIZE) == 0, - EM_LOCM_SIZE_ERROR); - -/** EM shared memory pointer */ -extern em_shm_t *em_shm; -/** EM core local memory */ -extern ENV_LOCAL em_locm_t em_locm; - -#ifdef __cplusplus -} -#endif - -#endif /* EM_MEM_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * EM Shared & Local Memory data + * + */ + +#ifndef EM_MEM_H_ +#define EM_MEM_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * EM shared memory data + * + * Struct contains data that is shared between all EM-cores, + * i.e. shared between all EM-processes or EM-threads depending on the setup. + */ +typedef struct { + /** Handle for this shared memory */ + odp_shm_t this_shm; + /** EM internal log function, overridable via em_conf, var args */ + em_log_func_t log_fn; + /** EM internal log function, overridable via em_conf, va_list */ + em_vlog_func_t vlog_fn; + /** EM configuration as given to em_init() */ + em_conf_t conf ENV_CACHE_LINE_ALIGNED; + /** EM config file options */ + opt_t opt ENV_CACHE_LINE_ALIGNED; + /** Mapping between physical core id <-> EM core id */ + core_map_t core_map ENV_CACHE_LINE_ALIGNED; + /** Table of buffer/packet/event pools used by EM */ + mpool_tbl_t mpool_tbl ENV_CACHE_LINE_ALIGNED; + /** Pool of free event/mempools */ + mpool_pool_t mpool_pool ENV_CACHE_LINE_ALIGNED; + /** EO table */ + eo_tbl_t eo_tbl ENV_CACHE_LINE_ALIGNED; + /** EO pool of free/unused EOs */ + eo_pool_t eo_pool ENV_CACHE_LINE_ALIGNED; + /** Event Chaining resources */ + event_chaining_t event_chaining ENV_CACHE_LINE_ALIGNED; + /** Queue table */ + queue_tbl_t queue_tbl ENV_CACHE_LINE_ALIGNED; + /** Queue pool of free/unused dynamic queues */ + queue_pool_t queue_pool ENV_CACHE_LINE_ALIGNED; + /** Queue pool of free/unused static queues */ + queue_pool_t queue_pool_static ENV_CACHE_LINE_ALIGNED; + /** Queue group table */ + queue_group_tbl_t queue_group_tbl ENV_CACHE_LINE_ALIGNED; + /** Queue group pool of free/unused queue groups */ + queue_group_pool_t queue_group_pool ENV_CACHE_LINE_ALIGNED; + /** Atomic group table */ + atomic_group_tbl_t atomic_group_tbl ENV_CACHE_LINE_ALIGNED; + /** Dynamic atomic group pool */ + atomic_group_pool_t atomic_group_pool ENV_CACHE_LINE_ALIGNED; + /** Event group table */ + event_group_tbl_t event_group_tbl ENV_CACHE_LINE_ALIGNED; + /** Event group stash of free/unused queue groups */ + odp_stash_t event_group_stash ENV_CACHE_LINE_ALIGNED; + /** Error handler structure */ + error_handler_t error_handler ENV_CACHE_LINE_ALIGNED; + + /** Dispatcher enter callback functions currently in use */ + hook_tbl_t *dispatch_enter_cb_tbl ENV_CACHE_LINE_ALIGNED; + /** Dispatcher exit callback functions currently in use */ + hook_tbl_t *dispatch_exit_cb_tbl; + /** Alloc-hook functions currently in use */ + hook_tbl_t *alloc_hook_tbl; + /** Free-hook functions currently in use */ + hook_tbl_t *free_hook_tbl; + /** Send-hook functions currently in use */ + hook_tbl_t *send_hook_tbl; + /** To_idle hook functions currently in use */ + hook_tbl_t *to_idle_hook_tbl; + /** To_active hook functions currently in use */ + hook_tbl_t *to_active_hook_tbl; + /** While_idle hook functions currently in use */ + hook_tbl_t *while_idle_hook_tbl; + + /** Dispatch enter callback storage, many sets of callback-tables */ + hook_storage_t dispatch_enter_cb_storage ENV_CACHE_LINE_ALIGNED; + /** Dispatch exit callback storage, many sets of callback-tables */ + hook_storage_t dispatch_exit_cb_storage ENV_CACHE_LINE_ALIGNED; + /** Alloc-hook function storage, many sets of hook-tables */ + hook_storage_t alloc_hook_storage ENV_CACHE_LINE_ALIGNED; + /** Free-hook function storage, many sets of hook-tables */ + hook_storage_t free_hook_storage ENV_CACHE_LINE_ALIGNED; + /** Send-hook function storage, many sets of hook-tables */ + hook_storage_t send_hook_storage ENV_CACHE_LINE_ALIGNED; + /** To_idle hook functions storage, many sets of hook-tables */ + hook_storage_t to_idle_hook_storage; + /** To_active hook functions storage, many sets of hook-tables */ + hook_storage_t to_active_hook_storage; + /** While_idle hook functions storage, many sets of hook-tables */ + hook_storage_t while_idle_hook_storage; + + /** Current number of allocated EOs */ + env_atomic32_t eo_count ENV_CACHE_LINE_ALIGNED; + /** Timer resources */ + timer_storage_t timers ENV_CACHE_LINE_ALIGNED; + /** Current number of allocated queues */ + env_atomic32_t queue_count ENV_CACHE_LINE_ALIGNED; + /** Current number of allocated queue groups */ + env_atomic32_t queue_group_count; + /** Current number of allocated event groups */ + env_atomic32_t event_group_count; + /** Current number of allocated atomic groups */ + env_atomic32_t atomic_group_count; + /** Current number of allocated event pools */ + env_atomic32_t pool_count; + /** libconfig setting, default (compiled) and runtime (from file) */ + libconfig_t libconfig; + /** priority mapping */ + struct { + /** mapping table */ + int map[EM_QUEUE_PRIO_NUM]; + int num_runtime; + } queue_prio; + + /** Guarantee that size is a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} em_shm_t; + +COMPILE_TIME_ASSERT(sizeof(em_shm_t) % ENV_CACHE_LINE_SIZE == 0, + EM_SHM_SIZE_ERROR); + +/** + * EM core/local current state + * + * Contains information about the current EO, queue, event group etc. when + * running in an EO context (e.g. in an EO-receive function), + * undef/NULL otherwise. + */ +typedef struct ODP_PACKED { + /** Current scheduling context type */ + em_sched_context_type_t sched_context_type; + /** EO-receive function burst count */ + int rcv_multi_cnt; + /** Current queue element during a receive call */ + queue_elem_t *q_elem; + /** Current scheduled queue element that set the sched context*/ + queue_elem_t *sched_q_elem; + /** Current event group */ + em_event_group_t egrp; + /** Current event group element */ + event_group_elem_t *egrp_elem; + /** Current event group generation count*/ + int32_t egrp_gen; +} em_locm_current_t; + +/** + * EM core local data + */ +typedef struct { + /** EM core/local current state */ + em_locm_current_t current; + + /** Idle state of the core, used when calling idle hooks */ + idle_state_t idle_state; + + /** EM core id for this core */ + int core_id; + /** The number of events from the scheduler to dispatch */ + int event_burst_cnt; + + /** em_atomic_processing_end() called during event dispatch */ + bool atomic_group_released; + /** Is input_poll_fn executed on this core */ + bool do_input_poll; + /** Is output_drain_fn executed on this core */ + bool do_output_drain; + /** Is thread external to EM (doesn't participate in event dispatching) */ + bool is_external_thr; + /** Is the scheduler paused on this core (for odp_sched_pause/resume()) */ + bool is_sched_paused; + + /** Number of dispatch rounds since previous polling of ctrl queues */ + unsigned int dispatch_cnt; + /** Time when polling of ctrl queues where last done */ + odp_time_t dispatch_last_run; + + /** Number of dispatch rounds since previous call of poll/drain functions */ + unsigned int poll_drain_dispatch_cnt; + /** Time when poll and drain functions were last called */ + odp_time_t poll_drain_dispatch_last_run; + + /** Local queues, i.e. storage for events to local queues */ + local_queues_t local_queues; + + /** EO start-function ongoing, buffer all events and send after start */ + eo_elem_t *start_eo_elem; + /** The number of errors on a core */ + uint64_t error_count; + + /** EM-core local log function */ + em_log_func_t log_fn; + + /** EM-core local log function with va_list */ + em_vlog_func_t vlog_fn; + + /** Synchronous API */ + sync_api_t sync_api; + + /** dispatcher debug timestamps (ns) */ + uint64_t debug_ts[EM_DEBUG_TSP_LAST]; + + /** Track output-queues used during this dispatch round (burst) */ + output_queue_track_t output_queue_track; + + /** Guarantee that size is a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} em_locm_t; + +COMPILE_TIME_ASSERT((sizeof(em_locm_t) % ENV_CACHE_LINE_SIZE) == 0, + EM_LOCM_SIZE_ERROR); + +/** EM shared memory pointer */ +extern em_shm_t *em_shm; +/** EM core local memory */ +extern ENV_LOCAL em_locm_t em_locm; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_MEM_H_ */ diff --git a/src/em_pool.c b/src/em_pool.c index fe89252e..cc52607e 100644 --- a/src/em_pool.c +++ b/src/em_pool.c @@ -1,2426 +1,2426 @@ -/* - * Copyright (c) 2015-2023, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -#ifndef __clang__ -COMPILE_TIME_ASSERT(EM_POOL_DEFAULT > (em_pool_t)0 && - EM_POOL_DEFAULT < (em_pool_t)EM_CONFIG_POOLS, - EM_ODP_EM_DEFAULT_POOL_ERROR); -COMPILE_TIME_ASSERT(EM_POOL_UNDEF != EM_POOL_DEFAULT, - EM_ODP_EM_POOL_UNDEF_ERROR); -#endif -COMPILE_TIME_ASSERT(EM_EVENT_USER_AREA_MAX_SIZE < UINT16_MAX, - EM_ODP_EM_EVENT_USER_AREA_MAX_SIZE_ERROR); - -/** - * @def ALIGN_OFFSET_MAX - * - * Max supported value for the config file option 'pool.align_offset'. - */ -#define ALIGN_OFFSET_MAX ((int)(16)) - -/* ALIGN_OFFSET_MAX <= 2^bits - 1, must fit into event_hdr_t::align_offset */ -COMPILE_TIME_ASSERT(ALIGN_OFFSET_MAX <= - ((1 << (sizeof_field(event_hdr_t, align_offset) * 8)) - 1), - ALIGN_OFFSET_MAX__TOO_LARGE); - -/** - * @brief Undef value for a pool_subpool_t - * pool_subpool_undef = {.pool = EM_POOL_UNDEF, .subpool = 0}; - */ -const pool_subpool_t pool_subpool_undef = {.pool = (uint32_t)(uintptr_t)EM_POOL_UNDEF, - .subpool = 0}; - -static inline mpool_elem_t * -mpool_poolelem2pool(objpool_elem_t *const objpool_elem) -{ - return (mpool_elem_t *)((uintptr_t)objpool_elem - - offsetof(mpool_elem_t, objpool_elem)); -} - -static em_pool_t -pool_alloc(em_pool_t pool) -{ - mpool_elem_t *mpool_elem; - - if (pool == EM_POOL_UNDEF) { - objpool_elem_t *objpool_elem = - objpool_rem(&em_shm->mpool_pool.objpool, em_core_id()); - - if (unlikely(objpool_elem == NULL)) - return EM_POOL_UNDEF; - - mpool_elem = mpool_poolelem2pool(objpool_elem); - } else { - int ret; - - mpool_elem = pool_elem_get(pool); - if (unlikely(mpool_elem == NULL)) - return EM_POOL_UNDEF; - - ret = objpool_rem_elem(&em_shm->mpool_pool.objpool, - &mpool_elem->objpool_elem); - if (unlikely(ret != 0)) - return EM_POOL_UNDEF; - } - - env_atomic32_inc(&em_shm->pool_count); - return mpool_elem->em_pool; -} - -static em_status_t -pool_free(em_pool_t pool) -{ - mpool_elem_t *mpool_elem = pool_elem_get(pool); - - if (unlikely(mpool_elem == NULL)) - return EM_ERR_BAD_ID; - - objpool_add(&em_shm->mpool_pool.objpool, - mpool_elem->objpool_elem.subpool_idx, - &mpool_elem->objpool_elem); - - env_atomic32_dec(&em_shm->pool_count); - return EM_OK; -} - -static int event_type_from_string(const char *str, em_event_type_t *event_type /*out*/) -{ - if (strstr(str, "EM_EVENT_TYPE_SW")) { - *event_type = EM_EVENT_TYPE_SW; - } else if (strstr(str, "EM_EVENT_TYPE_PACKET")) { - *event_type = EM_EVENT_TYPE_PACKET; - } else if (strstr(str, "EM_EVENT_TYPE_VECTOR")) { - *event_type = EM_EVENT_TYPE_VECTOR; - } else { - EM_LOG(EM_LOG_ERR, "Event type %s not supported.\n", str); - return -1; - } - - return 0; -} - -/* Read option: startup_pools.conf[i].pool_cfg.subpools[j] from the EM config file */ -static inline int read_config_subpool(const libconfig_list_t *subpool, int index, - const char *pool_cfg_str, em_pool_cfg_t *cfg/*out*/) -{ - int ret; - /* Option: subpools[index].size */ - ret = em_libconfig_list_lookup_int(subpool, index, "size", - (int *)&cfg->subpool[index].size); - if (unlikely(ret != 1)) { - EM_LOG(EM_LOG_ERR, - "Option '%s.subpools[%d].size' not found or wrong type.\n", - pool_cfg_str, index); - return -1; - } - - if (cfg->subpool[index].size <= 0) { - EM_LOG(EM_LOG_ERR, "Invalid '%s.subpools[%d].size'.\n", - pool_cfg_str, index); - return -1; - } - - /* Option: subpools[index].num */ - ret = em_libconfig_list_lookup_int(subpool, index, "num", - (int *)&cfg->subpool[index].num); - if (unlikely(ret != 1)) { - EM_LOG(EM_LOG_ERR, - "Option '%s.subpools[%d].num' not found or wrong type.\n", - pool_cfg_str, index); - return -1; - } - - if (cfg->subpool[index].num <= 0) { - EM_LOG(EM_LOG_ERR, "Invalid '%s.subpools[%d].num'.\n", - pool_cfg_str, index); - return -1; - } - - /* - * Option: subpools[index].cache_size - * Not mandatory - */ - ret = em_libconfig_list_lookup_int(subpool, index, "cache_size", - (int *)&cfg->subpool[index].cache_size); - - /* If cache_size is given, check if it is valid */ - if (ret == 1) { - uint32_t min_cache_size; - const odp_pool_capability_t *capa; - - capa = &em_shm->mpool_tbl.odp_pool_capability; - - min_cache_size = (cfg->event_type == EM_EVENT_TYPE_SW) ? - capa->buf.min_cache_size : capa->pkt.min_cache_size; - - if (unlikely(cfg->subpool[index].cache_size < min_cache_size)) { - EM_LOG(EM_LOG_ERR, - "'%s.subpools[%d].cache_size' too small.\n", - pool_cfg_str, index); - return -1; - } - } else if (ret == 0) {/*cache_size is given but with wrong data type*/ - EM_LOG(EM_LOG_ERR, - "'%s.subpools[%d].cache_size' wrong data type.\n", - pool_cfg_str, index); - return -1; - } - - /* No need to return fail -1 when cache_size not given (ret == -1) */ - return 0; -} - -static int is_pool_type_supported(em_event_type_t type, - const char **err_str/*out*/) -{ - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - if (type == EM_EVENT_TYPE_SW) { - if (capa->buf.max_pools == 0) { - *err_str = "SW (buf) pool type unsupported"; - return -1; - } - } else if (type == EM_EVENT_TYPE_PACKET) { - if (capa->pkt.max_pools == 0) { - *err_str = "PACKET pool type unsupported"; - return -1; - } - } else if (type == EM_EVENT_TYPE_VECTOR) { - if (capa->vector.max_pools == 0) { - *err_str = "VECTOR pool type unsupported"; - return -1; - } - } else { - *err_str = "Pool type unsupported, use _SW, _PACKET or _VECTOR"; - return -1; - } - - return 0; -} - -static inline bool is_align_offset_valid(const em_pool_cfg_t *pool_cfg) -{ - if (pool_cfg->align_offset.in_use && - (pool_cfg->align_offset.value > ALIGN_OFFSET_MAX || - !POWEROF2(pool_cfg->align_offset.value))) { - return false; - } - - return true; -} - -static inline int is_user_area_valid(const em_pool_cfg_t *pool_cfg, - const odp_pool_capability_t *capa, - const char **err_str/*out*/) -{ - /* No need to check when pool specific value is not used */ - if (!pool_cfg->user_area.in_use) - return 0; - - if (pool_cfg->user_area.size > EM_EVENT_USER_AREA_MAX_SIZE) { - *err_str = "Event user area too large"; - return -1; - } - - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { - size_t req_odp_uarea_sz = pool_cfg->user_area.size + - sizeof(event_hdr_t); - if (req_odp_uarea_sz > capa->pkt.max_uarea_size) { - *err_str = "ODP pkt max uarea not large enough"; - return -1; - } - } - if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) { - size_t req_odp_uarea_sz = pool_cfg->user_area.size + - sizeof(event_hdr_t); - if (req_odp_uarea_sz > capa->vector.max_uarea_size) { - *err_str = "ODP pkt-vector max uarea not large enough"; - return -1; - } - } - - return 0; -} - -/* Read option: startup_pools.conf[index].pool_cfg.align_offset from the EM config file */ -static inline int read_config_align_offset(const libconfig_group_t *align_offset, - const char *pool_cfg_str, - em_pool_cfg_t *cfg/*out*/) -{ - int ret; - - /* Option: startup_pools.conf[index].pool_cfg.align_offset.in_use */ - ret = em_libconfig_group_lookup_bool(align_offset, "in_use", - &cfg->align_offset.in_use); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, - "'%s.align_offset.in_use' not found or wrong type\n", - pool_cfg_str); - return -1; - } - - /* Option: startup_pools.conf[index].pool_cfg.align_offset.value */ - ret = em_libconfig_group_lookup_int(align_offset, "value", - (int *)&cfg->align_offset.value); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, - "'%s.align_offset.value' not found or wrong type\n", - pool_cfg_str); - return -1; - } - - /* Check whether the given value is valid or not */ - if (!is_align_offset_valid(cfg)) { - EM_LOG(EM_LOG_ERR, "Invalid '%s.align_offset.value': %d\n" - "Max align_offset is %d and it must be power of 2\n", - pool_cfg_str, cfg->align_offset.value, ALIGN_OFFSET_MAX); - return -1; - } - - return 0; -} - -/* Read option: startup_pools.conf[index].pool_cfg.user_area from the EM config file */ -static inline int read_config_user_area(const libconfig_group_t *user_area, - const char *pool_cfg_str, - em_pool_cfg_t *cfg/*out*/) -{ - int ret; - const odp_pool_capability_t *capa; - const char *err_str = ""; - - /* Option: startup_pools.conf[index].pool_cfg.user_area.in_use */ - ret = em_libconfig_group_lookup_bool(user_area, "in_use", - &cfg->user_area.in_use); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, - "'%s.user_area.in_use' not found or wrong type\n", - pool_cfg_str); - return -1; - } - - /* Option: startup_pools.conf[index].pool_cfg.user_area.size */ - ret = em_libconfig_group_lookup_int(user_area, "size", - (int *)&cfg->user_area.size); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, - "'%s.user_area.size' not found or wrong type\n", - pool_cfg_str); - return -1; - } - - capa = &em_shm->mpool_tbl.odp_pool_capability; - /* Check whether the given value is valid or not */ - if (is_user_area_valid(cfg, capa, &err_str) < 0) { - EM_LOG(EM_LOG_ERR, "%s: %ld\n", err_str, cfg->user_area.size); - return -1; - } - - return 0; -} - -/* Read option: startup_pools.conf[index].pool_cfg.pkt.headroom from the EM config file */ -static inline int read_config_pkt_headroom(const libconfig_group_t *pkt_headroom, - const char *pool_cfg_str, - em_pool_cfg_t *cfg/*out*/) -{ - int ret; - const odp_pool_capability_t *capa; - - /*Option: startup_pools.conf[index].pool_cfg.pkt.headroom.in_use*/ - ret = em_libconfig_group_lookup_bool(pkt_headroom, "in_use", - &cfg->pkt.headroom.in_use); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, - "'%s.pkt.headroom.in_use' not found or wrong type\n", - pool_cfg_str); - return -1; - } - - /*Option: startup_pools.conf[index].pool_cfg.pkt.headroom.value*/ - ret = em_libconfig_group_lookup_int(pkt_headroom, "value", - (int *)&cfg->pkt.headroom.value); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, - "'%s.pkt.headroom.value' not found or wrong type\n", - pool_cfg_str); - return -1; - } - - /* Check whether the given value is valid or not */ - capa = &em_shm->mpool_tbl.odp_pool_capability; - if (cfg->pkt.headroom.in_use && - cfg->pkt.headroom.value > capa->pkt.max_headroom) { - EM_LOG(EM_LOG_ERR, - "'%s.pkt.headroom.value' %d too large (max=%d)\n", - pool_cfg_str, cfg->pkt.headroom.value, - capa->pkt.max_headroom); - return -1; - } - - return 0; -} - -/* Read option: startup_pools.conf[index] from the EM config file */ -static int read_config_startup_pools_conf(const libconfig_list_t *list, int index) -{ - int ret; - int pool; - int ret_pool; - int num_subpools; - const char *pool_name; - const char *event_type; - char pool_cfg_str[40]; - libconfig_group_t *pool_cfg; - const libconfig_list_t *subpool; - const libconfig_group_t *headroom; - const libconfig_group_t *user_area; - const libconfig_group_t *align_offset; - startup_pool_conf_t *conf = &em_shm->opt.startup_pools.conf[index]; - em_pool_cfg_t *cfg = &conf->cfg; - const char *err_str = ""; - - snprintf(pool_cfg_str, sizeof(pool_cfg_str), - "startup_pools.conf[%d].pool_cfg", index); - - pool_cfg = em_libconfig_list_lookup_group(list, index, "pool_cfg"); - if (!pool_cfg) { - EM_LOG(EM_LOG_ERR, "Conf option '%s' not found\n", pool_cfg_str); - return -1; - } - - em_pool_cfg_init(cfg); - - /* - * Read mandatory fields first, in case they are not provided, no need - * to proceed to read optional fields. - */ - - /* Option: startup_pools.conf[index].pool_cfg.event_type */ - ret = em_libconfig_group_lookup_string(pool_cfg, "event_type", &event_type); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "'%s.event_type' not found.\n", pool_cfg_str); - return -1; - } - - ret = event_type_from_string(event_type, &cfg->event_type/*out*/); - if (unlikely(ret < 0)) - return -1; - - ret = is_pool_type_supported(cfg->event_type, &err_str/*out*/); - if (unlikely(ret)) { - EM_LOG(EM_LOG_ERR, "%s", err_str); - return -1; - } - - /* Option: startup_pools.conf[index].pool_cfg.num_subpools */ - ret = em_libconfig_group_lookup_int(pool_cfg, "num_subpools", - &cfg->num_subpools); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "'%s.num_subpools' not found.\n", pool_cfg_str); - return -1; - } - - if (cfg->num_subpools <= 0 || cfg->num_subpools > EM_MAX_SUBPOOLS) { - EM_LOG(EM_LOG_ERR, "Invalid '%s.num_subpools'\n" - "Valid value range is [1, %d]\n", pool_cfg_str, - EM_MAX_SUBPOOLS); - return -1; - } - - /* Option: startup_pools.conf[index].pool_cfg.subpools */ - subpool = em_libconfig_group_lookup_list(pool_cfg, "subpools"); - if (unlikely(!subpool)) { - EM_LOG(EM_LOG_ERR, "'%s.subpools' not found.\n", pool_cfg_str); - return -1; - } - - num_subpools = em_libconfig_list_length(subpool); - if (unlikely(num_subpools != cfg->num_subpools)) { - EM_LOG(EM_LOG_ERR, "The number of subpool configuration given\n" - "in '%s.subpools' does not match '%s.num_subpools'.\n", - pool_cfg_str, pool_cfg_str); - return -1; - } - - for (int j = 0; j < num_subpools; j++) { - ret = read_config_subpool(subpool, j, pool_cfg_str, cfg); - - if (unlikely(ret < 0)) - return -1; - } - - /* Following are optional configurations */ - - /* Option: startup_pools.conf[index].pool */ - ret_pool = em_libconfig_list_lookup_int(list, index, "pool", &pool); - if (unlikely(ret_pool == 0)) { - EM_LOG(EM_LOG_ERR, - "'startup_pools.conf[%d].pool' has wrong data type(expect int)\n", - index); - return -1; - } - - /* startup_pools.conf[index].pool is provided */ - if (ret_pool == 1) { - if (pool < 0 || pool > EM_CONFIG_POOLS) { - EM_LOG(EM_LOG_ERR, "Invalid pool ID %d, valid IDs are within [0, %d]\n", - pool, EM_CONFIG_POOLS); - return -1; - } - - conf->pool = (em_pool_t)(uintptr_t)pool; - } - - /* Option: startup_pools.conf[index].name */ - ret = em_libconfig_list_lookup_string(list, index, "name", &pool_name); - if (unlikely(ret == 0)) { - EM_LOG(EM_LOG_ERR, - "'startup_pools.conf[%d].name' has wrong data type(expect string)\n", - index); - return -1; - } - - if (ret_pool == 1 && ret == 1) { /*Both pool and name have been given*/ - const char *is_default_name = strstr(pool_name, EM_POOL_DEFAULT_NAME); - bool is_default_id = (conf->pool == EM_POOL_DEFAULT); - - if (is_default_name && !is_default_id) { - EM_LOG(EM_LOG_ERR, - "Default name \"%s\" with non-default ID %d\n", - EM_POOL_DEFAULT_NAME, (int)(uintptr_t)conf->pool); - return -1; - } - - if (is_default_id && !is_default_name) { - EM_LOG(EM_LOG_ERR, - "Default pool ID 1 with non-default name \"%s\"\n", - pool_name); - return -1; - } - } - - if (ret == 1) { /* Pool name is given and no conflict with pool ID */ - strncpy(conf->name, pool_name, EM_POOL_NAME_LEN - 1); - conf->name[EM_POOL_NAME_LEN - 1] = '\0'; - } - - align_offset = em_libconfig_group_lookup_group(pool_cfg, "align_offset"); - /*align_offset is provided*/ - if (align_offset && read_config_align_offset(align_offset, pool_cfg_str, cfg)) - return -1; - - user_area = em_libconfig_group_lookup_group(pool_cfg, "user_area"); - if (user_area && read_config_user_area(user_area, pool_cfg_str, cfg)) - return -1; - - headroom = em_libconfig_group_lookup_group(pool_cfg, "pkt.headroom"); - if (headroom) { - if (read_config_pkt_headroom(headroom, pool_cfg_str, cfg)) - return -1; - - /* Ignore the given pkt.headroom for non packet event type */ - if (conf->cfg.event_type != EM_EVENT_TYPE_PACKET) - EM_PRINT("pkt.headroom will be ignored for non packet type!\n"); - } - - return 0; -} - -/* Print option: startup_pools from the EM config file */ -static void print_config_startup_pools(void) -{ - startup_pool_conf_t *conf; - char str_conf[32]; - const char *str = ""; - - EM_PRINT(" startup_pools.num: %u\n", em_shm->opt.startup_pools.num); - - for (uint32_t i = 0; i < em_shm->opt.startup_pools.num; i++) { - conf = &em_shm->opt.startup_pools.conf[i]; - - snprintf(str_conf, sizeof(str_conf), " startup_pools.conf[%d]", i); - - if (*conf->name) - EM_PRINT("%s.name: %s\n", str_conf, conf->name); - - if (conf->pool) - EM_PRINT("%s.pool: %d\n", str_conf, (int)(uintptr_t)conf->pool); - - /*event type*/ - if (conf->cfg.event_type == EM_EVENT_TYPE_SW) - str = "EM_EVENT_TYPE_SW"; - else if (conf->cfg.event_type == EM_EVENT_TYPE_PACKET) - str = "EM_EVENT_TYPE_PACKET"; - else if (conf->cfg.event_type == EM_EVENT_TYPE_VECTOR) - str = "EM_EVENT_TYPE_VECTOR"; - EM_PRINT("%s.pool_cfg.event_type: %s\n", str_conf, str); - - /*align_offset*/ - str = conf->cfg.align_offset.in_use ? "true" : "false"; - EM_PRINT("%s.pool_cfg.align_offset.in_use: %s\n", str_conf, str); - EM_PRINT("%s.pool_cfg.align_offset.value: %d\n", str_conf, - conf->cfg.align_offset.value); - - /*user area*/ - str = conf->cfg.user_area.in_use ? "true" : "false"; - EM_PRINT("%s.pool_cfg.user_area.in_use: %s\n", str_conf, str); - EM_PRINT("%s.pool_cfg.user_area.size: %ld\n", str_conf, - conf->cfg.user_area.size); - - /*pkt headroom*/ - str = conf->cfg.pkt.headroom.in_use ? "true" : "false"; - EM_PRINT("%s.pool_cfg.pkt.headroom.in_use: %s\n", str_conf, str); - EM_PRINT("%s.pool_cfg.pkt.headroom.value: %d\n", str_conf, - conf->cfg.pkt.headroom.value); - - /*number of subpools*/ - EM_PRINT("%s.pool_cfg.num_subpools: %u\n", str_conf, - conf->cfg.num_subpools); - - /*subpools*/ - for (int j = 0; j < conf->cfg.num_subpools; j++) { - EM_PRINT("%s.pool_cfg.subpools[%d].size: %u\n", str_conf, - j, conf->cfg.subpool[j].size); - - EM_PRINT("%s.pool_cfg.subpools[%d].num: %u\n", str_conf, - j, conf->cfg.subpool[j].num); - - EM_PRINT("%s.pool_cfg.subpools[%d].cache_size: %u\n", - str_conf, j, conf->cfg.subpool[j].cache_size); - } - } -} - -/* Read option: startup_pools from the EM config file */ -static int read_config_startup_pools(void) -{ - int ret; - int list_len; - int num_startup_pools; - const libconfig_list_t *conf_list; - libconfig_setting_t *default_setting; - libconfig_setting_t *runtime_setting; - libconfig_setting_t *startup_pools_setting; - - em_libconfig_lookup(&em_shm->libconfig, "startup_pools", - &default_setting, &runtime_setting); - - /* - * Option: startup_pools - * - * Optional. Thus, when runtime configuration is provided, and option - * "startup_pools" is given, use it. However, when option "startup_pools" - * is not specified in the given runtime configuration file, returns - * without giving error, which means no startup pools will be created. - * Note that it does not fall back to use the option "startup_pools" - * specified in the default configuration file. - */ - if (em_shm->libconfig.has_cfg_runtime) { - if (runtime_setting) - startup_pools_setting = runtime_setting; - else - return 0; - } else { - if (default_setting) - startup_pools_setting = default_setting; - else - return 0; - } - - EM_PRINT("EM-startup_pools config:\n"); - /* - * Option: startup_pools.num - * Mandatory when startup_pools option is given - */ - ret = em_libconfig_setting_lookup_int(startup_pools_setting, "num", - &num_startup_pools); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Option 'startup_pools.num' not found\n"); - return -1; - } - - if (num_startup_pools <= 0 || num_startup_pools > EM_CONFIG_POOLS - 1) { - EM_LOG(EM_LOG_ERR, - "Number of startup_pools %d is too large or too small\n" - "Valid value range is [1, %d]\n", - num_startup_pools, EM_CONFIG_POOLS - 1); - return -1; - } - - conf_list = em_libconfig_setting_get_list(startup_pools_setting, "conf"); - if (!conf_list) { - EM_LOG(EM_LOG_ERR, "Conf option 'startup_pools.conf' not found\n"); - return -1; - } - - list_len = em_libconfig_list_length(conf_list); - if (list_len != num_startup_pools) { - EM_LOG(EM_LOG_ERR, - "The number of pool configuration(s) given in\n" - "'startup_pools.conf':%d does not match number of\n" - "startup_pools specified in 'startup_pools.num': %d\n", - list_len, num_startup_pools); - return -1; - } - - for (int i = 0; i < list_len; i++) { - if (read_config_startup_pools_conf(conf_list, i) < 0) - return -1; - } - - em_shm->opt.startup_pools.num = num_startup_pools; - - print_config_startup_pools(); - return 0; -} - -/* Read option: pool from the EM config file */ -static int read_config_pool(void) -{ - const char *conf_str; - bool val_bool = false; - int val = 0; - int ret; - - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - - EM_PRINT("EM-pool config:\n"); - - /* - * Option: pool.statistics.available - */ - conf_str = "pool.statistics.available"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.available = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.alloc_ops - */ - conf_str = "pool.statistics.alloc_ops"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.alloc_ops = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.alloc_fails - */ - conf_str = "pool.statistics.alloc_fails"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.alloc_fails = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.free_ops - */ - conf_str = "pool.statistics.free_ops"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.free_ops = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.total_ops - */ - conf_str = "pool.statistics.total_ops"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.total_ops = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.cache_available - */ - conf_str = "pool.statistics.cache_available"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.cache_available = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.cache_alloc_ops - */ - conf_str = "pool.statistics.cache_alloc_ops"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.cache_alloc_ops = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.cache_free_ops - */ - conf_str = "pool.statistics.cache_free_ops"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.cache_free_ops = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.statistics.core_cache_available - */ - conf_str = "pool.statistics.core_cache_available"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - em_shm->opt.pool.statistics.core_cache_available = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); - - /* - * Option: pool.align_offset - */ - conf_str = "pool.align_offset"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - if (val < 0 || val > ALIGN_OFFSET_MAX || !POWEROF2(val)) { - EM_LOG(EM_LOG_ERR, - "Bad config value '%s = %d' (max: %d and value must be power of 2)\n", - conf_str, val, ALIGN_OFFSET_MAX); - return -1; - } - /* store & print the value */ - em_shm->opt.pool.align_offset = val; - EM_PRINT(" %s (default): %d (max: %d)\n", - conf_str, val, ALIGN_OFFSET_MAX); - - /* - * Option: pool.user_area_size - */ - conf_str = "pool.user_area_size"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - if (val < 0 || (unsigned int)val > capa->pkt.max_uarea_size || - val > EM_EVENT_USER_AREA_MAX_SIZE) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", - conf_str, val); - return -1; - } - /* store & print the value */ - em_shm->opt.pool.user_area_size = val; - EM_PRINT(" %s (default): %d (max: %d)\n", - conf_str, val, - MIN(EM_EVENT_USER_AREA_MAX_SIZE, capa->pkt.max_uarea_size)); - - /* - * Option: pool.pkt_headroom - */ - conf_str = "pool.pkt_headroom"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - - if (val < 0 || (unsigned int)val > capa->pkt.max_headroom) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", - conf_str, val); - return -1; - } - /* store & print the value */ - em_shm->opt.pool.pkt_headroom = val; - EM_PRINT(" %s (default): %d (max: %u)\n", - conf_str, val, capa->pkt.max_headroom); - - return 0; -} - -static int -read_config_file(void) -{ - /* Option: pool */ - if (read_config_pool() < 0) - return -1; - - /* Option: startup_pools */ - if (read_config_startup_pools() < 0) - return -1; - - return 0; -} - -/* We use following static asserts and function check_em_pool_subpool_stats() - * to verify at both compile time and runtime that, em_pool_subpool_stats_t is - * exactly the same as odp_pool_stats_t except the last struct member, namely, - * 'em_pool_subpool_stats_t::__internal_use', whose size must also be bigger - * than that of 'odp_pool_stats_t::thread'. This allows us to avoid exposing ODP - * type in EM-ODP API (at event_machine_pool.h in this case) and allows us to - * type cast 'em_pool_subpool_stats_t' to 'odp_pool_stats_t', ensuring high - * performance (see em_pool_stats() and em_pool_subpool_stats()). - */ - -ODP_STATIC_ASSERT(sizeof(odp_pool_stats_t) <= sizeof(em_pool_subpool_stats_t), - "Size of odp_pool_stats_t must be smaller than that of em_pool_subpool_stats_t"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, available) == - offsetof(em_pool_subpool_stats_t, available) && - sizeof_field(odp_pool_stats_t, available) == - sizeof_field(em_pool_subpool_stats_t, available), - "em_pool_subpool_stats_t.available differs from odp_pool_stats_t.available!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, alloc_ops) == - offsetof(em_pool_subpool_stats_t, alloc_ops) && - sizeof_field(odp_pool_stats_t, alloc_ops) == - sizeof_field(em_pool_subpool_stats_t, alloc_ops), - "em_pool_subpool_stats_t.alloc_ops differs from odp_pool_stats_t.alloc_ops!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, alloc_fails) == - offsetof(em_pool_subpool_stats_t, alloc_fails) && - sizeof_field(odp_pool_stats_t, alloc_fails) == - sizeof_field(em_pool_subpool_stats_t, alloc_fails), - "em_pool_subpool_stats_t.alloc_fails differs from odp_pool_stats_t.alloc_fails!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, free_ops) == - offsetof(em_pool_subpool_stats_t, free_ops) && - sizeof_field(odp_pool_stats_t, free_ops) == - sizeof_field(em_pool_subpool_stats_t, free_ops), - "em_pool_subpool_stats_t.free_ops differs from odp_pool_stats_t.free_ops!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, total_ops) == - offsetof(em_pool_subpool_stats_t, total_ops) && - sizeof_field(odp_pool_stats_t, total_ops) == - sizeof_field(em_pool_subpool_stats_t, total_ops), - "em_pool_subpool_stats_t.total_ops differs from odp_pool_stats_t.total_ops!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, cache_available) == - offsetof(em_pool_subpool_stats_t, cache_available) && - sizeof_field(odp_pool_stats_t, cache_available) == - sizeof_field(em_pool_subpool_stats_t, cache_available), - "em_pool_subpool_stats_t.cache_available differs from that of odp_pool_stats_t!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, cache_alloc_ops) == - offsetof(em_pool_subpool_stats_t, cache_alloc_ops) && - sizeof_field(odp_pool_stats_t, cache_alloc_ops) == - sizeof_field(em_pool_subpool_stats_t, cache_alloc_ops), - "em_pool_subpool_stats_t.cache_alloc_ops differs from that of odp_pool_stats_t!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, cache_free_ops) == - offsetof(em_pool_subpool_stats_t, cache_free_ops) && - sizeof_field(odp_pool_stats_t, cache_free_ops) == - sizeof_field(em_pool_subpool_stats_t, cache_free_ops), - "em_pool_subpool_stats_t.cache_free_ops differs from that of odp_pool_stats_t!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, thread) == - offsetof(em_pool_subpool_stats_t, __internal_use) && - sizeof_field(odp_pool_stats_t, thread) <= - sizeof_field(em_pool_subpool_stats_t, __internal_use), - "em_pool_subpool_stats_t.__internal_use differs from odp_pool_stats_t.thread"); - -#define STRUCT_ERR_STR \ -"em_pool_subpool_stats_t.%s differs from odp_pool_stats_t.%s either in size or in offset!\n" - -static int check_em_pool_subpool_stats(void) -{ - if (sizeof(odp_pool_stats_t) > sizeof(em_pool_subpool_stats_t)) { - EM_LOG(EM_LOG_ERR, - "Size of odp_pool_stats_t bigger than that of em_pool_subpool_stats_t\n"); - return -1; - } - - if (offsetof(odp_pool_stats_t, available) != - offsetof(em_pool_subpool_stats_t, available) || - sizeof_field(odp_pool_stats_t, available) != - sizeof_field(em_pool_subpool_stats_t, available)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "available", "available"); - return -1; - } - - if (offsetof(odp_pool_stats_t, alloc_ops) != - offsetof(em_pool_subpool_stats_t, alloc_ops) || - sizeof_field(odp_pool_stats_t, alloc_ops) != - sizeof_field(em_pool_subpool_stats_t, alloc_ops)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "alloc_ops", "alloc_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_t, alloc_fails) != - offsetof(em_pool_subpool_stats_t, alloc_fails) || - sizeof_field(odp_pool_stats_t, alloc_fails) != - sizeof_field(em_pool_subpool_stats_t, alloc_fails)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "alloc_fails", "alloc_fails"); - return -1; - } - - if (offsetof(odp_pool_stats_t, free_ops) != - offsetof(em_pool_subpool_stats_t, free_ops) || - sizeof_field(odp_pool_stats_t, free_ops) != - sizeof_field(em_pool_subpool_stats_t, free_ops)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "free_ops", "free_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_t, total_ops) != - offsetof(em_pool_subpool_stats_t, total_ops) || - sizeof_field(odp_pool_stats_t, total_ops) != - sizeof_field(em_pool_subpool_stats_t, total_ops)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "total_ops", "total_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_t, cache_available) != - offsetof(em_pool_subpool_stats_t, cache_available) || - sizeof_field(odp_pool_stats_t, cache_available) != - sizeof_field(em_pool_subpool_stats_t, cache_available)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "cache_available", "cache_available"); - return -1; - } - - if (offsetof(odp_pool_stats_t, cache_alloc_ops) != - offsetof(em_pool_subpool_stats_t, cache_alloc_ops) || - sizeof_field(odp_pool_stats_t, cache_alloc_ops) != - sizeof_field(em_pool_subpool_stats_t, cache_alloc_ops)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "cache_alloc_ops", "cache_alloc_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_t, cache_free_ops) != - offsetof(em_pool_subpool_stats_t, cache_free_ops) || - sizeof_field(odp_pool_stats_t, cache_free_ops) != - sizeof_field(em_pool_subpool_stats_t, cache_free_ops)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "cache_free_ops", "cache_free_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_t, thread) != - offsetof(em_pool_subpool_stats_t, __internal_use) || - sizeof_field(odp_pool_stats_t, thread) > - sizeof_field(em_pool_subpool_stats_t, __internal_use)) { - EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "__internal_use", "thread"); - return -1; - } - - return 0; -} - -/* We use following static asserts and function check_em_pool_subpool_stats_selected() - * to verify at both compile time and runtime that, em_pool_subpool_stats_selected_t - * is exactly the same as odp_pool_stats_selected_t This allows us to avoid exposing - * ODP type in EM-ODP API (at event_machine_pool.h in this case) and allows us to - * type cast 'em_pool_subpool_stats_selected_t' to 'odp_pool_stats_selected_t', ensuring - * high performance (see em_pool_stats_selected() and em_pool_subpool_stats_selected()). - */ - -#define SIZE_NOT_EQUAL_ERR_STR \ -"Size of odp_pool_stats_selected_t must equal to that of em_pool_subpool_stats_selected_t\n" - -ODP_STATIC_ASSERT(sizeof(odp_pool_stats_selected_t) == sizeof(em_pool_subpool_stats_selected_t), - SIZE_NOT_EQUAL_ERR_STR); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, available) == - offsetof(em_pool_subpool_stats_selected_t, available) && - sizeof_field(odp_pool_stats_selected_t, available) == - sizeof_field(em_pool_subpool_stats_selected_t, available), - "available in em_pool_subpool_stats_selected_t and odp_pool_stats_selected_t differs!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, alloc_ops) == - offsetof(em_pool_subpool_stats_selected_t, alloc_ops) && - sizeof_field(odp_pool_stats_selected_t, alloc_ops) == - sizeof_field(em_pool_subpool_stats_selected_t, alloc_ops), - "em_pool_subpool_stats_selected_t.alloc_ops differs from odp_pool_stats_selected_t.alloc_ops!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, alloc_fails) == - offsetof(em_pool_subpool_stats_t, alloc_fails) && - sizeof_field(odp_pool_stats_selected_t, alloc_fails) == - sizeof_field(em_pool_subpool_stats_t, alloc_fails), - "em_pool_subpool_stats_selected_t.alloc_fails differs from odp_pool_stats_selected_t.alloc_fails!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, free_ops) == - offsetof(em_pool_subpool_stats_selected_t, free_ops) && - sizeof_field(odp_pool_stats_selected_t, free_ops) == - sizeof_field(em_pool_subpool_stats_selected_t, free_ops), - "em_pool_subpool_stats_selected_t.free_ops differs from odp_pool_stats_selected_t.free_ops!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, total_ops) == - offsetof(em_pool_subpool_stats_selected_t, total_ops) && - sizeof_field(odp_pool_stats_selected_t, total_ops) == - sizeof_field(em_pool_subpool_stats_selected_t, total_ops), - "em_pool_subpool_stats_selected_t.total_ops differs from odp_pool_stats_selected_t.total_ops!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, cache_available) == - offsetof(em_pool_subpool_stats_selected_t, cache_available) && - sizeof_field(odp_pool_stats_selected_t, cache_available) == - sizeof_field(em_pool_subpool_stats_selected_t, cache_available), - "em_pool_subpool_stats_selected_t.cache_available differs from that of odp_pool_stats_selected_t!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, cache_alloc_ops) == - offsetof(em_pool_subpool_stats_selected_t, cache_alloc_ops) && - sizeof_field(odp_pool_stats_selected_t, cache_alloc_ops) == - sizeof_field(em_pool_subpool_stats_selected_t, cache_alloc_ops), - "em_pool_subpool_stats_selected_t.cache_alloc_ops differs from that of odp_pool_stats_selected_t!"); - -ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, cache_free_ops) == - offsetof(em_pool_subpool_stats_selected_t, cache_free_ops) && - sizeof_field(odp_pool_stats_selected_t, cache_free_ops) == - sizeof_field(em_pool_subpool_stats_selected_t, cache_free_ops), - "em_pool_subpool_stats_selected_t.cache_free_ops differs from that of odp_pool_stats_selected_t!"); - -#define SELECTED_TYPE_ERR_FMT \ -"em_pool_subpool_stats_selected_t.%s differs from odp_pool_stats_selected_t.%s\n" - -static int check_em_pool_subpool_stats_selected(void) -{ - if (sizeof(odp_pool_stats_selected_t) != sizeof(em_pool_subpool_stats_selected_t)) { - EM_LOG(EM_LOG_ERR, - "odp_pool_stats_selected_t vs em_pool_subpool_stats_selected_t size diff\n"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, available) != - offsetof(em_pool_subpool_stats_selected_t, available) || - sizeof_field(odp_pool_stats_selected_t, available) != - sizeof_field(em_pool_subpool_stats_selected_t, available)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "available", "available"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, alloc_ops) != - offsetof(em_pool_subpool_stats_selected_t, alloc_ops) || - sizeof_field(odp_pool_stats_selected_t, alloc_ops) != - sizeof_field(em_pool_subpool_stats_selected_t, alloc_ops)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "alloc_ops", "alloc_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, alloc_fails) != - offsetof(em_pool_subpool_stats_selected_t, alloc_fails) || - sizeof_field(odp_pool_stats_selected_t, alloc_fails) != - sizeof_field(em_pool_subpool_stats_selected_t, alloc_fails)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "alloc_fails", "alloc_fails"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, free_ops) != - offsetof(em_pool_subpool_stats_selected_t, free_ops) || - sizeof_field(odp_pool_stats_selected_t, free_ops) != - sizeof_field(em_pool_subpool_stats_selected_t, free_ops)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "free_ops", "free_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, total_ops) != - offsetof(em_pool_subpool_stats_selected_t, total_ops) || - sizeof_field(odp_pool_stats_selected_t, total_ops) != - sizeof_field(em_pool_subpool_stats_selected_t, total_ops)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "total_ops", "total_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, cache_available) != - offsetof(em_pool_subpool_stats_selected_t, cache_available) || - sizeof_field(odp_pool_stats_selected_t, cache_available) != - sizeof_field(em_pool_subpool_stats_selected_t, cache_available)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "cache_available", "cache_available"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, cache_alloc_ops) != - offsetof(em_pool_subpool_stats_selected_t, cache_alloc_ops) || - sizeof_field(odp_pool_stats_selected_t, cache_alloc_ops) != - sizeof_field(em_pool_subpool_stats_selected_t, cache_alloc_ops)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "cache_alloc_ops", "cache_alloc_ops"); - return -1; - } - - if (offsetof(odp_pool_stats_selected_t, cache_free_ops) != - offsetof(em_pool_subpool_stats_selected_t, cache_free_ops) || - sizeof_field(odp_pool_stats_selected_t, cache_free_ops) != - sizeof_field(em_pool_subpool_stats_selected_t, cache_free_ops)) { - EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "cache_free_ops", "cache_free_ops"); - return -1; - } - - return 0; -} - -ODP_STATIC_ASSERT(sizeof(odp_pool_stats_opt_t) == sizeof(em_pool_stats_opt_t), - "Size of odp_pool_stats_opt_t differs from that of em_pool_stats_opt_t\n"); - -em_status_t -pool_init(mpool_tbl_t *const mpool_tbl, mpool_pool_t *const mpool_pool, - const em_pool_cfg_t *default_pool_cfg) -{ - int ret; - em_pool_t pool; - em_pool_t pool_default; - startup_pool_conf_t *startup_pool_conf; - bool default_pool_set = false; - const uint32_t objpool_subpools = MIN(4, OBJSUBPOOLS_MAX); - - /* Return error if em_pool_subpool_stats_t differs from odp_pool_stats_t */ - if (check_em_pool_subpool_stats()) - return EM_ERR; - - /*Return error if em_pool_subpool_stats_selected_t differs from odp_pool_stats_selected_t*/ - if (check_em_pool_subpool_stats_selected()) - return EM_ERR; - - memset(mpool_tbl, 0, sizeof(mpool_tbl_t)); - memset(mpool_pool, 0, sizeof(mpool_pool_t)); - env_atomic32_init(&em_shm->pool_count); - - ret = objpool_init(&mpool_pool->objpool, objpool_subpools); - if (ret != 0) - return EM_ERR_OPERATION_FAILED; - - for (uint32_t i = 0; i < EM_CONFIG_POOLS; i++) { - pool = pool_idx2hdl(i); - mpool_elem_t *mpool_elem = pool_elem_get(pool); - - if (unlikely(!mpool_elem)) - return EM_ERR_BAD_POINTER; - - mpool_elem->em_pool = pool; - mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; - for (int j = 0; j < EM_MAX_SUBPOOLS; j++) { - mpool_elem->odp_pool[j] = ODP_POOL_INVALID; - mpool_elem->size[j] = 0; - } - - objpool_add(&mpool_pool->objpool, i % objpool_subpools, - &mpool_elem->objpool_elem); - } - - /* Init the mapping tbl from odp-pool(=subpool) index to em-pool */ - if (odp_pool_max_index() >= POOL_ODP2EM_TBL_LEN) - return EM_ERR_TOO_LARGE; - for (int i = 0; i < POOL_ODP2EM_TBL_LEN; i++) - mpool_tbl->pool_subpool_odp2em[i].both = pool_subpool_undef.both; - - /* Store common ODP pool capabilities in the mpool_tbl for easy access*/ - if (odp_pool_capability(&mpool_tbl->odp_pool_capability) != 0) - return EM_ERR_LIB_FAILED; - - /* Read EM-pool and EM-startup_pools related runtime config options */ - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - /* - * Create default and startup pools. - * - * If default pool configuration is given through 'startup_pools.conf' - * in em-odp.conf, use that instead. Otherwise use default_pool_cfg. - * - * Allocate/reserve default pool first here so when creating startup - * pools whose configuration does not provide pool handle, default pool - * handle EM_POOL_DEFAULT(1) won't be allocated to them. - */ - pool_default = pool_alloc(EM_POOL_DEFAULT); - - if (unlikely(pool_default == EM_POOL_UNDEF || - pool_default != EM_POOL_DEFAULT)) - return EM_ERR_ALLOC_FAILED; - - /* Create startup pools whose configuration is provided by the EM config file */ - for (uint32_t i = 0; i < em_shm->opt.startup_pools.num; i++) { - startup_pool_conf = &em_shm->opt.startup_pools.conf[i]; - - /* Default pool is provided by the EM config file */ - if (strstr(startup_pool_conf->name, EM_POOL_DEFAULT_NAME) || - startup_pool_conf->pool == EM_POOL_DEFAULT) { - default_pool_set = true; - pool_free(EM_POOL_DEFAULT); - pool = em_pool_create(EM_POOL_DEFAULT_NAME, - EM_POOL_DEFAULT, - &startup_pool_conf->cfg); - } else { - pool = em_pool_create(startup_pool_conf->name, - startup_pool_conf->pool, - &startup_pool_conf->cfg); - } - - if (pool == EM_POOL_UNDEF) - return EM_ERR_ALLOC_FAILED; - } - - /* Create the default pool if it is not provided by the EM config file */ - if (!default_pool_set) { - pool_free(EM_POOL_DEFAULT); - pool = em_pool_create(EM_POOL_DEFAULT_NAME, EM_POOL_DEFAULT, - default_pool_cfg); - if (pool == EM_POOL_UNDEF || pool != EM_POOL_DEFAULT) - return EM_ERR_ALLOC_FAILED; - } - - return EM_OK; -} - -em_status_t -pool_term(const mpool_tbl_t *mpool_tbl) -{ - em_status_t stat = EM_OK; - - (void)mpool_tbl; - - EM_PRINT("\n" - "Status before delete:\n"); - em_pool_info_print_all(); - - for (int i = 0; i < EM_CONFIG_POOLS; i++) { - em_pool_t pool = pool_idx2hdl(i); - const mpool_elem_t *mpool_elem = pool_elem_get(pool); - em_status_t ret; - - if (mpool_elem && pool_allocated(mpool_elem)) { - ret = pool_delete(pool); - if (ret != EM_OK) - stat = ret; /* save last error as return val */ - } - } - - return stat; -} - -/* Helper func to invalid_pool_cfg() */ -static int invalid_pool_cache_cfg(const em_pool_cfg_t *pool_cfg, - const char **err_str/*out*/) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - uint32_t min_cache_size; - uint32_t cache_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_SW) - min_cache_size = capa->buf.min_cache_size; - else if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) - min_cache_size = capa->pkt.min_cache_size; - else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) - min_cache_size = capa->vector.min_cache_size; - else - return -9; - - for (int i = 0; i < pool_cfg->num_subpools; i++) { - if (pool_cfg->subpool[i].size <= 0 || - pool_cfg->subpool[i].num <= 0) { - *err_str = "Invalid subpool size/num"; - return -(1 * 10 + i); /* -10, -11, ... */ - } - - cache_size = pool_cfg->subpool[i].cache_size; - if (unlikely(cache_size < min_cache_size)) { - *err_str = "Requested cache size too small"; - return -(2 * 10 + i); /* -20, -21, ... */ - } - /* - * If the given cache size is larger than odp-max, - * then use odp-max: - * if (cache_size > max_cache_size) - * cache_size = max_cache_size; - * This is done later in pool_create(); - */ - } - - return 0; -} - -int invalid_pool_cfg(const em_pool_cfg_t *pool_cfg, const char **err_str/*out*/) -{ - int ret = 0; - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - if (!pool_cfg) { - *err_str = "Pool config NULL"; - return -1; - } - if (pool_cfg->__internal_check != EM_CHECK_INIT_CALLED) { - *err_str = "Pool config not initialized"; - return -1; - } - - if (pool_cfg->num_subpools <= 0 || - pool_cfg->num_subpools > EM_MAX_SUBPOOLS) { - *err_str = "Invalid number of subpools"; - return -1; - } - - ret = is_pool_type_supported(pool_cfg->event_type, err_str/*out*/); - if (ret) - return ret; - - if (!is_align_offset_valid(pool_cfg)) { - *err_str = "Invalid align offset"; - return -1; - } - - ret = is_user_area_valid(pool_cfg, capa, err_str/*out*/); - if (ret) - return ret; - - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET && - pool_cfg->pkt.headroom.in_use && - pool_cfg->pkt.headroom.value > capa->pkt.max_headroom) { - *err_str = "Requested pkt headroom size too large"; - return -1; - } - - ret = invalid_pool_cache_cfg(pool_cfg, err_str/*out*/); - - return ret; /* 0: success, <0: error */ -} - -int check_pool_uarea_persistence(const em_pool_cfg_t *pool_cfg, const char **err_str/*out*/) -{ -#if ODP_VERSION_API_NUM(1, 42, 0) <= ODP_VERSION_API - bool has_uarea_persistence; - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - switch (pool_cfg->event_type) { - case EM_EVENT_TYPE_SW: - has_uarea_persistence = capa->buf.uarea_persistence ? true : false; - *err_str = "buf-pool (EM_EVENT_TYPE_SW)"; - break; - case EM_EVENT_TYPE_PACKET: - has_uarea_persistence = capa->pkt.uarea_persistence ? true : false; - *err_str = "pkt-pool (EM_EVENT_TYPE_PACKET)"; - break; - case EM_EVENT_TYPE_VECTOR: - has_uarea_persistence = capa->vector.uarea_persistence ? true : false; - *err_str = "vector-pool (EM_EVENT_TYPE_VECTOR)"; - break; - default: - has_uarea_persistence = false; - *err_str = "unknown pool-type"; - break; - } - - return has_uarea_persistence ? 0 : -1; /* 0: success, <0: not supported */ -#else - return 0; -#endif -} - -/* - * Helper to pool_create() - preallocate all events in the pool for ESV to - * maintain event state over multiple alloc- and free-operations. - */ -static void -pool_prealloc(const mpool_elem_t *pool_elem) -{ - event_prealloc_hdr_t *prealloc_hdr = NULL; - uint64_t num_tot = 0; - uint64_t num = 0; - uint64_t num_free = 0; - const uint32_t size = pool_elem->pool_cfg.subpool[0].size; - list_node_t evlist; - list_node_t *node; - - list_init(&evlist); - - for (int i = 0; i < pool_elem->num_subpools; i++) - num_tot += pool_elem->pool_cfg.subpool[i].num; - - do { - prealloc_hdr = event_prealloc(pool_elem, size); - if (likely(prealloc_hdr)) { - list_add(&evlist, &prealloc_hdr->list_node); - num++; - } - } while (prealloc_hdr); - - if (unlikely(num < num_tot)) - INTERNAL_ERROR(EM_FATAL(EM_ERR_TOO_SMALL), - EM_ESCOPE_POOL_CREATE, - "alloc: events expected:%" PRIu64 " actual:%" PRIu64 "", - num_tot, num); - - while (!list_is_empty(&evlist)) { - node = list_rem_first(&evlist); - prealloc_hdr = list_node_to_prealloc_hdr(node); - em_free(prealloc_hdr->ev_hdr.event); - num_free++; - } - - if (unlikely(num_free > num)) - INTERNAL_ERROR(EM_FATAL(EM_ERR_TOO_LARGE), - EM_ESCOPE_POOL_CREATE, - "free: events expected:%" PRIu64 " actual:%" PRIu64 "", - num, num_free); -} - -/* - * pool_create() helper: sort subpool cfg in ascending order based on buf size - */ -static void -sort_pool_cfg(const em_pool_cfg_t *pool_cfg, em_pool_cfg_t *sorted_cfg /*out*/) -{ - const int num_subpools = pool_cfg->num_subpools; - - *sorted_cfg = *pool_cfg; - - for (int i = 0; i < num_subpools - 1; i++) { - int idx = i; /* array index containing smallest size */ - - for (int j = i + 1; j < num_subpools; j++) { - if (sorted_cfg->subpool[j].size < - sorted_cfg->subpool[idx].size) - idx = j; /* store idx to smallest */ - } - - /* min size at [idx], swap with [i] */ - if (idx != i) { - uint32_t size = sorted_cfg->subpool[i].size; - uint32_t num = sorted_cfg->subpool[i].num; - uint32_t cache_size = sorted_cfg->subpool[i].cache_size; - - sorted_cfg->subpool[i] = sorted_cfg->subpool[idx]; - - sorted_cfg->subpool[idx].size = size; - sorted_cfg->subpool[idx].num = num; - sorted_cfg->subpool[idx].cache_size = cache_size; - } - } -} - -/* - * pool_create() helper: set pool event-cache size. - * - * Set the requested subpool cache-size based on user provided value and - * limit set by odp-pool-capability. - * Requested value can be larger than odp-max, use odp--max in this - * case. - * Verification against odp-min value done in invalid_pool_cfg(). - */ -static void -set_poolcache_size(em_pool_cfg_t *pool_cfg) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - int num_subpools = pool_cfg->num_subpools; - uint32_t max_cache_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_SW) - max_cache_size = capa->buf.max_cache_size; - else if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) - max_cache_size = capa->pkt.max_cache_size; - else /* EM_EVENT_TYPE_VECTOR */ - max_cache_size = capa->vector.max_cache_size; - - for (int i = 0; i < num_subpools; i++) { - if (max_cache_size < pool_cfg->subpool[i].cache_size) - pool_cfg->subpool[i].cache_size = max_cache_size; - } -} - -/* - * pool_create() helper: determine payload alignment. - */ -static int -set_align(const em_pool_cfg_t *pool_cfg, - uint32_t *align_offset /*out*/, uint32_t *odp_align /*out*/) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - uint32_t offset = 0; - uint32_t align = ODP_CACHE_LINE_SIZE; - - /* Pool-specific param overrides config file 'align_offset' value */ - if (pool_cfg->align_offset.in_use) - offset = pool_cfg->align_offset.value; /* pool cfg */ - else - offset = em_shm->opt.pool.align_offset; /* cfg file */ - - /* Set subpool minimum alignment */ - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { - if (align > capa->pkt.max_align) - align = capa->pkt.max_align; - } else { - if (align > capa->buf.max_align) - align = capa->buf.max_align; - } - - *align_offset = offset; - *odp_align = align; - - /* verify alignment requirements */ - if (!POWEROF2(align) || align <= offset) - return -1; - - return 0; -} - -/* - * pool_create() helper: determine user area size. - */ -static int -set_uarea_size(const em_pool_cfg_t *pool_cfg, size_t *uarea_size/*out*/) -{ - size_t size = 0; - size_t max_size = 0; - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - - if (pool_cfg->user_area.in_use) /* use pool-cfg */ - size = pool_cfg->user_area.size; - else /* use cfg-file */ - size = em_shm->opt.pool.user_area_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) - max_size = MIN(capa->pkt.max_uarea_size, EM_EVENT_USER_AREA_MAX_SIZE); - else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) - max_size = MIN(capa->vector.max_uarea_size, EM_EVENT_USER_AREA_MAX_SIZE); - else if (size > 0) /* EM_EVENT_TYPE_SW: bufs */ - max_size = MIN(capa->buf.max_uarea_size, EM_EVENT_USER_AREA_MAX_SIZE); - - if (size > max_size) - return -1; - - *uarea_size = size; - - return 0; -} - -/* - * pool_create() helper: set the pkt headroom - */ -static int -set_pkt_headroom(const em_pool_cfg_t *pool_cfg, - uint32_t *pkt_headroom /*out*/, - uint32_t *max_headroom /*out, for err print only*/) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - /* default value from cfg file */ - uint32_t headroom = em_shm->opt.pool.pkt_headroom; - - /* Pool-specific param overrides config file value */ - if (pool_cfg->pkt.headroom.in_use) - headroom = pool_cfg->pkt.headroom.value; - - *pkt_headroom = headroom; - *max_headroom = capa->pkt.max_headroom; - - if (unlikely(headroom > capa->pkt.max_headroom)) - return -1; - - return 0; -} - -/** Helper to create_subpools() */ -static void set_pool_params_stats(odp_pool_stats_opt_t *param_stats /*out*/, - const odp_pool_stats_opt_t *capa_stats, - const em_pool_stats_opt_t *stats_opt) -{ - param_stats->all = 0; - - if (capa_stats->bit.available) - param_stats->bit.available = stats_opt->available; - - if (capa_stats->bit.alloc_ops) - param_stats->bit.alloc_ops = stats_opt->alloc_ops; - - if (capa_stats->bit.alloc_fails) - param_stats->bit.alloc_fails = stats_opt->alloc_fails; - - if (capa_stats->bit.free_ops) - param_stats->bit.free_ops = stats_opt->free_ops; - - if (capa_stats->bit.total_ops) - param_stats->bit.total_ops = stats_opt->total_ops; - - if (capa_stats->bit.cache_alloc_ops) - param_stats->bit.cache_alloc_ops = stats_opt->cache_alloc_ops; - - if (capa_stats->bit.cache_available) - param_stats->bit.cache_available = stats_opt->cache_available; - - if (capa_stats->bit.cache_free_ops) - param_stats->bit.cache_free_ops = stats_opt->cache_free_ops; - - if (capa_stats->bit.thread_cache_available) - param_stats->bit.thread_cache_available = stats_opt->core_cache_available; -} - -/** Helper to create_subpools() */ -static void set_pool_params_pkt(odp_pool_param_t *pool_params /* out */, - const em_pool_cfg_t *pool_cfg, - uint32_t size, uint32_t num, uint32_t cache_size, - uint32_t align_offset, uint32_t odp_align, - uint32_t uarea_size, uint32_t pkt_headroom) -{ - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - odp_pool_param_init(pool_params); - - pool_params->type = ODP_POOL_PACKET; - /* num == max_num, helps pool-info stats calculation */ - pool_params->pkt.num = num; - pool_params->pkt.max_num = num; - - if (size > align_offset) - size = size - align_offset; - else - size = 1; /* 0:default, can be big => use 1 */ - /* len == max_len */ - pool_params->pkt.len = size; - pool_params->pkt.max_len = size; - pool_params->pkt.seg_len = size; - pool_params->pkt.align = odp_align; - /* - * Reserve space for the event header in each packet's - * ODP-user-area: - */ - pool_params->pkt.uarea_size = sizeof(event_hdr_t) + uarea_size; - /* - * Set the pkt headroom. - * Make sure the alloc-alignment fits into the headroom. - */ - pool_params->pkt.headroom = pkt_headroom; - if (pkt_headroom < align_offset) - pool_params->pkt.headroom = align_offset; - - pool_params->pkt.cache_size = cache_size; - - /* Pkt pool statistics */ - if (pool_cfg->stats_opt.in_use) { - set_pool_params_stats(&pool_params->stats, &capa->pkt.stats, - &pool_cfg->stats_opt.opt); - } else { - set_pool_params_stats(&pool_params->stats, &capa->pkt.stats, - &em_shm->opt.pool.statistics);/*from cnf file*/ - } -} - -static void set_pool_params_vector(odp_pool_param_t *pool_params /* out */, - const em_pool_cfg_t *pool_cfg, - uint32_t size, uint32_t num, - uint32_t cache_size, uint32_t uarea_size) -{ - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - odp_pool_param_init(pool_params); - - pool_params->type = ODP_POOL_VECTOR; - pool_params->vector.num = num; - pool_params->vector.max_size = size; - /* Reserve space for the EM event header in the vector's ODP-user-area */ - pool_params->vector.uarea_size = sizeof(event_hdr_t) + uarea_size; - pool_params->vector.cache_size = cache_size; - - /* Vector pool statistics */ - if (pool_cfg->stats_opt.in_use) - set_pool_params_stats(&pool_params->stats, &capa->vector.stats, - &pool_cfg->stats_opt.opt); - else - set_pool_params_stats(&pool_params->stats, &capa->vector.stats, - &em_shm->opt.pool.statistics); -} - -/** Helper to create_subpools() */ -static void set_pool_params_buf(odp_pool_param_t *pool_params /* out */, - const em_pool_cfg_t *pool_cfg, - uint32_t size, uint32_t num, uint32_t cache_size, - uint32_t align_offset, uint32_t odp_align, - uint32_t uarea_size) -{ - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - odp_pool_param_init(pool_params); - - pool_params->type = ODP_POOL_BUFFER; - pool_params->buf.num = num; - pool_params->buf.size = size; - if (align_offset) - pool_params->buf.size += 32 - align_offset; - pool_params->buf.align = odp_align; - pool_params->buf.uarea_size = sizeof(event_hdr_t) + uarea_size; - pool_params->buf.cache_size = cache_size; - - /* Buf pool statistics */ - if (pool_cfg->stats_opt.in_use) - set_pool_params_stats(&pool_params->stats, &capa->buf.stats, - &pool_cfg->stats_opt.opt); - else - set_pool_params_stats(&pool_params->stats, &capa->buf.stats, - &em_shm->opt.pool.statistics); -} - -static int -create_subpools(const em_pool_cfg_t *pool_cfg, - uint32_t align_offset, uint32_t odp_align, - uint32_t uarea_size, uint32_t pkt_headroom, - mpool_elem_t *mpool_elem /*out*/) -{ - const int num_subpools = pool_cfg->num_subpools; - mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; - - for (int i = 0; i < num_subpools; i++) { - char pool_name[ODP_POOL_NAME_LEN]; - odp_pool_param_t pool_params; - uint32_t size = pool_cfg->subpool[i].size; - uint32_t num = pool_cfg->subpool[i].num; - uint32_t cache_size = pool_cfg->subpool[i].cache_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { - set_pool_params_pkt(&pool_params /* out */, pool_cfg, - size, num, cache_size, - align_offset, odp_align, - uarea_size, pkt_headroom); - } else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) { - set_pool_params_vector(&pool_params /* out */, pool_cfg, - size, num, cache_size, - uarea_size); - } else { /* pool_cfg->event_type == EM_EVENT_TYPE_SW */ - set_pool_params_buf(&pool_params /* out */, pool_cfg, - size, num, cache_size, - align_offset, odp_align, uarea_size); - } - - mpool_elem->size[i] = pool_cfg->subpool[i].size; - mpool_elem->stats_opt = pool_params.stats; - - snprintf(pool_name, sizeof(pool_name), "%" PRI_POOL ":%d-%s", - mpool_elem->em_pool, i, mpool_elem->name); - pool_name[sizeof(pool_name) - 1] = '\0'; - - odp_pool_t odp_pool = odp_pool_create(pool_name, &pool_params); - - if (unlikely(odp_pool == ODP_POOL_INVALID)) - return -1; - - mpool_elem->odp_pool[i] = odp_pool; - mpool_elem->num_subpools++; /* created subpools for delete */ - - int odp_pool_idx = odp_pool_index(odp_pool); - - if (unlikely(odp_pool_idx < 0)) - return -2; - - /* Store mapping from odp-pool (idx) to em-pool & subpool */ - mpool_tbl->pool_subpool_odp2em[odp_pool_idx].pool = - (uint32_t)(uintptr_t)mpool_elem->em_pool; - mpool_tbl->pool_subpool_odp2em[odp_pool_idx].subpool = i; - - /* odp_pool_print(odp_pool); */ - } - - return 0; -} - -em_pool_t -pool_create(const char *name, em_pool_t req_pool, const em_pool_cfg_t *pool_cfg) -{ - const em_event_type_t pool_evtype = pool_cfg->event_type; - int err = 0; - - /* Allocate a free EM pool */ - const em_pool_t pool = pool_alloc(req_pool/* requested or undef*/); - - if (unlikely(pool == EM_POOL_UNDEF)) - return EM_POOL_UNDEF; - - mpool_elem_t *mpool_elem = pool_elem_get(pool); - - /* Sanity check */ - if (!mpool_elem || mpool_elem->em_pool != pool) - return EM_POOL_UNDEF; - - mpool_elem->event_type = pool_evtype; - /* Store successfully created subpools later */ - mpool_elem->num_subpools = 0; - /* Store the event pool name, if given */ - if (name && *name) { - strncpy(mpool_elem->name, name, sizeof(mpool_elem->name)); - mpool_elem->name[sizeof(mpool_elem->name) - 1] = '\0'; - } else { - mpool_elem->name[0] = '\0'; - } - - em_pool_cfg_t sorted_cfg; - - /* - * Sort the subpool cfg in ascending order based on the buffer size - */ - sort_pool_cfg(pool_cfg, &sorted_cfg/*out*/); - /* Use sorted_cfg instead of pool_cfg from here on */ - - /* - * Set the cache-size of each subpool in the EM-pool - */ - set_poolcache_size(&sorted_cfg); - - /* Store the sorted config */ - mpool_elem->pool_cfg = sorted_cfg; - - /* - * Event payload alignment requirement for the pool - */ - uint32_t align_offset = 0; - uint32_t odp_align = 0; - - /* align only valid for bufs and pkts */ - if (pool_evtype == EM_EVENT_TYPE_SW || - pool_evtype == EM_EVENT_TYPE_PACKET) { - err = set_align(&sorted_cfg, &align_offset/*out*/, - &odp_align/*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" align mismatch:\n" - "align:%u cfg:align_offset:%u", - name, odp_align, align_offset); - goto error; - } - } - /* store the align offset, needed in pkt-alloc */ - mpool_elem->align_offset = align_offset; - - /* - * Event user area size. - * Pool-specific param overrides config file 'user_area_size' value - */ - size_t uarea_size = 0; - - err = set_uarea_size(&sorted_cfg, &uarea_size/*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" invalid uarea config: req.size:%zu", - name, uarea_size); - goto error; - } - - /* store the user_area sizes, needed in alloc */ - mpool_elem->user_area.size = uarea_size & UINT16_MAX; - - /* - * Set the headroom for events in EM packet pools - */ - uint32_t pkt_headroom = 0; - uint32_t max_headroom = 0; - - if (pool_evtype == EM_EVENT_TYPE_PACKET) { - err = set_pkt_headroom(&sorted_cfg, &pkt_headroom/*out*/, - &max_headroom/*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" invalid pkt headroom:\n" - "headroom:%u vs. max:headroom:%u", - name, pkt_headroom, max_headroom); - goto error; - } - } - - /* - * Create the subpools for the EM event-pool. - * Each EM subpool is an ODP pool. - */ - err = create_subpools(&sorted_cfg, align_offset, odp_align, - (uint32_t)uarea_size, pkt_headroom, mpool_elem /*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_ALLOC_FAILED), - EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" create fails:%d\n" - "subpools req:%d vs. subpools created:%d", - name, err, sorted_cfg.num_subpools, - mpool_elem->num_subpools); - goto error; - } - - /* - * ESV: preallocate all events in the pool - */ - if (esv_enabled() && em_shm->opt.esv.prealloc_pools) - pool_prealloc(mpool_elem); - - /* Success! */ - return mpool_elem->em_pool; - -error: - (void)pool_delete(pool); - return EM_POOL_UNDEF; -} - -em_status_t -pool_delete(em_pool_t pool) -{ - mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; - mpool_elem_t *const mpool_elem = pool_elem_get(pool); - - if (unlikely(mpool_elem == NULL || !pool_allocated(mpool_elem))) - return EM_ERR_BAD_ARG; - - for (int i = 0; i < mpool_elem->num_subpools; i++) { - odp_pool_t odp_pool = mpool_elem->odp_pool[i]; - int odp_pool_idx; - int ret; - - if (odp_pool == ODP_POOL_INVALID) - return EM_ERR_NOT_FOUND; - - odp_pool_idx = odp_pool_index(odp_pool); - - ret = odp_pool_destroy(odp_pool); - if (unlikely(ret)) - return EM_ERR_LIB_FAILED; - - mpool_elem->odp_pool[i] = ODP_POOL_INVALID; - mpool_elem->size[i] = 0; - - /* Clear mapping from odp-pool (idx) to em-pool & subpool */ - if (unlikely(odp_pool_idx < 0)) - return EM_ERR_BAD_ID; - mpool_tbl->pool_subpool_odp2em[odp_pool_idx].both = pool_subpool_undef.both; - } - - mpool_elem->name[0] = '\0'; - mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; - mpool_elem->num_subpools = 0; - - return pool_free(pool); -} - -em_pool_t -pool_find(const char *name) -{ - if (name && *name) { - for (int i = 0; i < EM_CONFIG_POOLS; i++) { - const mpool_elem_t *mpool_elem = - &em_shm->mpool_tbl.pool[i]; - - if (pool_allocated(mpool_elem) && - !strncmp(name, mpool_elem->name, EM_POOL_NAME_LEN)) - return mpool_elem->em_pool; - } - } - - return EM_POOL_UNDEF; -} - -unsigned int -pool_count(void) -{ - return env_atomic32_get(&em_shm->pool_count); -} - -#define POOL_INFO_HDR_STR \ -" id name type offset uarea sizes [size count(used/free) cache]\n" - -#define POOL_INFO_SUBSTR_FMT \ -"%d:[sz=%" PRIu32 " n=%" PRIu32 "(%" PRIu32 "/%" PRIu32 ") $=%" PRIu32 "]" - -#define POOL_INFO_SUBSTR_NO_STATS_FMT \ -"%d:[sz=%" PRIu32 " n=%" PRIu32 "(-/-) cache=%" PRIu32 "]" - -void pool_info_print_hdr(unsigned int num_pools) -{ - if (num_pools == 1) { - EM_PRINT("EM Event Pool\n" - "-------------\n" - POOL_INFO_HDR_STR); - } else { - EM_PRINT("EM Event Pools:%2u\n" - "-----------------\n" - POOL_INFO_HDR_STR, num_pools); - } -} - -void pool_info_print(em_pool_t pool) -{ - em_pool_info_t pool_info; - em_status_t stat; - const char *pool_type; - - stat = em_pool_info(pool, &pool_info/*out*/); - if (unlikely(stat != EM_OK)) { - EM_PRINT(" %-6" PRI_POOL " %-16s n/a n/a n/a n/a [n/a]\n", - pool, "err:n/a"); - return; - } - - if (pool_info.event_type == EM_EVENT_TYPE_VECTOR) - pool_type = "vec"; - else if (pool_info.event_type == EM_EVENT_TYPE_PACKET) - pool_type = "pkt"; - else - pool_type = "buf"; - - EM_PRINT(" %-6" PRI_POOL " %-16s %4s %02u %02zu %02u ", - pool, pool_info.name, pool_type, - pool_info.align_offset, pool_info.user_area_size, - pool_info.num_subpools); - - for (int i = 0; i < pool_info.num_subpools; i++) { - char subpool_str[42]; - - if (pool_info.subpool[i].used || pool_info.subpool[i].free) { - snprintf(subpool_str, sizeof(subpool_str), - POOL_INFO_SUBSTR_FMT, i, - pool_info.subpool[i].size, - pool_info.subpool[i].num, - pool_info.subpool[i].used, - pool_info.subpool[i].free, - pool_info.subpool[i].cache_size); - } else { - snprintf(subpool_str, sizeof(subpool_str), - POOL_INFO_SUBSTR_NO_STATS_FMT, i, - pool_info.subpool[i].size, - pool_info.subpool[i].num, - pool_info.subpool[i].cache_size); - } - subpool_str[sizeof(subpool_str) - 1] = '\0'; - EM_PRINT(" %-42s", subpool_str); - } - - EM_PRINT("\n"); -} - -#define POOL_STATS_HDR_STR \ -"EM pool statistics for pool %" PRI_POOL ":\n\n"\ -"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ -" Cache_alloc_ops Cache_free_ops\n"\ -"--------------------------------------------------------------------------" \ -"-------------------------------\n%s" - -#define POOL_STATS_LEN 107 -#define POOL_STATS_FMT "%-8u%-10lu%-10lu%-12lu%-9lu%-10lu%-16lu%-16lu%-15lu\n" - -void pool_stats_print(em_pool_t pool) -{ - em_status_t stat; - em_pool_stats_t pool_stats; - const em_pool_subpool_stats_t *subpool_stats; - int len = 0; - int n_print = 0; - const mpool_elem_t *pool_elem = pool_elem_get(pool); - const int stats_str_len = EM_MAX_SUBPOOLS * POOL_STATS_LEN + 1; - char stats_str[stats_str_len]; - - if (pool_elem == NULL || !pool_allocated(pool_elem)) { - EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); - return; - } - - stat = em_pool_stats(pool, &pool_stats); - if (unlikely(stat != EM_OK)) { - EM_PRINT("Failed to fetch EM pool statistics\n"); - return; - } - - for (uint32_t i = 0; i < pool_stats.num_subpools; i++) { - subpool_stats = &pool_stats.subpool_stats[i]; - n_print = snprintf(stats_str + len, stats_str_len - len, - POOL_STATS_FMT, - i, subpool_stats->available, - subpool_stats->alloc_ops, - subpool_stats->alloc_fails, - subpool_stats->free_ops, - subpool_stats->total_ops, - subpool_stats->cache_available, - subpool_stats->cache_alloc_ops, - subpool_stats->cache_free_ops); - - /* Not enough space to hold more subpool stats */ - if (n_print >= stats_str_len - len) - break; - - len += n_print; - } - - stats_str[len] = '\0'; - EM_PRINT(POOL_STATS_HDR_STR, pool, stats_str); -} - -#define POOL_STATS_SELECTED_HDR_STR \ -"Selected EM pool statistics for pool %" PRI_POOL ":\n\n"\ -"Selected statistic counters: %s\n\n"\ -"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ -" Cache_alloc_ops Cache_free_ops\n"\ -"--------------------------------------------------------------------------" \ -"-------------------------------\n%s" - -#define OPT_STR_LEN 150 - -static void fill_opt_str(char *opt_str, const em_pool_stats_opt_t *opt) -{ - int n_print; - int len = 0; - - if (opt->available) { - n_print = snprintf(opt_str + len, 12, "%s", "available"); - len += n_print; - } - - if (opt->alloc_ops) { - n_print = snprintf(opt_str + len, 12, "%s", len ? ", alloc_ops" : "alloc_ops"); - len += n_print; - } - - if (opt->alloc_fails) { - n_print = snprintf(opt_str + len, 14, "%s", len ? ", alloc_fails" : "alloc_fails"); - len += n_print; - } - - if (opt->free_ops) { - n_print = snprintf(opt_str + len, 11, "%s", len ? ", free_ops" : "free_ops"); - len += n_print; - } - - if (opt->total_ops) { - n_print = snprintf(opt_str + len, 12, "%s", len ? ", total_ops" : "total_ops"); - len += n_print; - } - - if (opt->cache_available) { - n_print = snprintf(opt_str + len, 18, "%s", - len ? ", cache_available" : "cache_available"); - len += n_print; - } - - if (opt->cache_alloc_ops) { - n_print = snprintf(opt_str + len, 18, "%s", - len ? ", cache_alloc_ops" : "cache_alloc_ops"); - len += n_print; - } - - if (opt->cache_free_ops) - snprintf(opt_str + len, 17, "%s", len ? ", cache_free_ops" : "cache_free_ops"); -} - -void pool_stats_selected_print(em_pool_t pool, const em_pool_stats_opt_t *opt) -{ - em_status_t stat; - em_pool_stats_selected_t pool_stats = {0}; - const em_pool_subpool_stats_selected_t *subpool_stats; - int len = 0; - int n_print = 0; - const mpool_elem_t *pool_elem = pool_elem_get(pool); - char opt_str[OPT_STR_LEN]; - const int stats_str_len = EM_MAX_SUBPOOLS * POOL_STATS_LEN + 1; - char stats_str[stats_str_len]; - - if (pool_elem == NULL || !pool_allocated(pool_elem)) { - EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); - return; - } - - stat = em_pool_stats_selected(pool, &pool_stats, opt); - if (unlikely(stat != EM_OK)) { - EM_PRINT("Failed to fetch EM selected pool statistics\n"); - return; - } - - for (uint32_t i = 0; i < pool_stats.num_subpools; i++) { - subpool_stats = &pool_stats.subpool_stats[i]; - - n_print = snprintf(stats_str + len, stats_str_len - len, - POOL_STATS_FMT, - i, - subpool_stats->available, - subpool_stats->alloc_ops, - subpool_stats->alloc_fails, - subpool_stats->free_ops, - subpool_stats->total_ops, - subpool_stats->cache_available, - subpool_stats->cache_alloc_ops, - subpool_stats->cache_free_ops); - - /* Not enough space to hold more subpool stats */ - if (n_print >= stats_str_len - len) - break; - - len += n_print; - } - stats_str[len] = '\0'; - - /* Fill selected statistic counters */ - fill_opt_str(opt_str, opt); - - EM_PRINT(POOL_STATS_SELECTED_HDR_STR, pool, opt_str, stats_str); -} - -#define SUBPOOL_STATS_HDR_STR \ -"EM subpool statistics for pool %" PRI_POOL ":\n\n"\ -"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ -" Cache_alloc_ops Cache_free_ops\n"\ -"--------------------------------------------------------------------------" \ -"-------------------------------\n%s" - -void subpools_stats_print(em_pool_t pool, const int subpools[], int num_subpools) -{ - int num_stats; - em_pool_subpool_stats_t stats[num_subpools]; - int len = 0; - int n_print = 0; - const mpool_elem_t *pool_elem = pool_elem_get(pool); - const int stats_str_len = num_subpools * POOL_STATS_LEN + 1; - char stats_str[stats_str_len]; - - if (pool_elem == NULL || !pool_allocated(pool_elem)) { - EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); - return; - } - - num_stats = em_pool_subpool_stats(pool, subpools, num_subpools, stats); - if (unlikely(!num_stats || num_stats > num_subpools)) { - EM_LOG(EM_LOG_ERR, "Failed to fetch subpool statistics\n"); - return; - } - - /* Print subpool stats */ - for (int i = 0; i < num_stats; i++) { - n_print = snprintf(stats_str + len, stats_str_len - len, - POOL_STATS_FMT, - subpools[i], stats[i].available, stats[i].alloc_ops, - stats[i].alloc_fails, stats[i].free_ops, - stats[i].total_ops, stats[i].cache_available, - stats[i].cache_alloc_ops, stats[i].cache_free_ops); - - /* Not enough space to hold more subpool stats */ - if (n_print >= stats_str_len - len) - break; - - len += n_print; - } - - stats_str[len] = '\0'; - EM_PRINT(SUBPOOL_STATS_HDR_STR, pool, stats_str); -} - -#define SUBPOOL_STATS_SELECTED_HDR_STR \ -"Selected EM subpool statistics for pool %" PRI_POOL ":\n\n"\ -"Selected statistic counters: %s\n\n"\ -"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ -" Cache_alloc_ops Cache_free_ops\n"\ -"--------------------------------------------------------------------------" \ -"-------------------------------\n%s" - -void subpools_stats_selected_print(em_pool_t pool, const int subpools[], - int num_subpools, const em_pool_stats_opt_t *opt) -{ - int num_stats; - char opt_str[OPT_STR_LEN]; - em_pool_subpool_stats_selected_t stats[num_subpools]; - int len = 0; - int n_print = 0; - const mpool_elem_t *pool_elem = pool_elem_get(pool); - const int stats_str_len = num_subpools * POOL_STATS_LEN + 1; - char stats_str[stats_str_len]; - - if (pool_elem == NULL || !pool_allocated(pool_elem)) { - EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); - return; - } - - memset(stats, 0, sizeof(stats)); - num_stats = em_pool_subpool_stats_selected(pool, subpools, num_subpools, stats, opt); - if (unlikely(!num_stats || num_stats > num_subpools)) { - EM_LOG(EM_LOG_ERR, "Failed to fetch selected subpool statistics\n"); - return; - } - - /* Print subpool stats */ - for (int i = 0; i < num_stats; i++) { - n_print = snprintf(stats_str + len, stats_str_len - len, - POOL_STATS_FMT, - subpools[i], stats[i].available, stats[i].alloc_ops, - stats[i].alloc_fails, stats[i].free_ops, - stats[i].total_ops, stats[i].cache_available, - stats[i].cache_alloc_ops, stats[i].cache_free_ops); - - /* Not enough space to hold more subpool stats */ - if (n_print >= stats_str_len - len) - break; - - len += n_print; - } - stats_str[len] = '\0'; - - /* Fill selected statistic counters */ - fill_opt_str(opt_str, opt); - EM_PRINT(SUBPOOL_STATS_SELECTED_HDR_STR, pool, opt_str, stats_str); -} - -void print_pool_elem_info(void) -{ - EM_PRINT("\n" - "pool-elem size: %zu B\n", - sizeof(mpool_elem_t)); - - EM_DBG("\t\toffset\tsize\n" - "\t\t------\t-----\n" - "event_type:\t%3zu B\t%3zu B\n" - "align_offset:\t%3zu B\t%3zu B\n" - "user_area info:\t%3zu B\t%3zu B\n" - "num_subpools:\t%3zu B\t%3zu B\n" - "size[]:\t\t%3zu B\t%3zu B\n" - "odp_pool[]:\t%3zu B\t%3zu B\n" - "em_pool:\t%3zu B\t%3zu B\n" - "objpool_elem:\t%3zu B\t%3zu B\n" - "stats_opt:\t%3zu B\t%3zu B\n" - "pool_cfg:\t%3zu B\t%3zu B\n" - "name[]:\t\t%3zu B\t%3zu B\n", - offsetof(mpool_elem_t, event_type), sizeof_field(mpool_elem_t, event_type), - offsetof(mpool_elem_t, align_offset), sizeof_field(mpool_elem_t, align_offset), - offsetof(mpool_elem_t, user_area), sizeof_field(mpool_elem_t, user_area), - offsetof(mpool_elem_t, num_subpools), sizeof_field(mpool_elem_t, num_subpools), - offsetof(mpool_elem_t, size), sizeof_field(mpool_elem_t, size), - offsetof(mpool_elem_t, odp_pool), sizeof_field(mpool_elem_t, odp_pool), - offsetof(mpool_elem_t, em_pool), sizeof_field(mpool_elem_t, em_pool), - offsetof(mpool_elem_t, objpool_elem), sizeof_field(mpool_elem_t, objpool_elem), - offsetof(mpool_elem_t, stats_opt), sizeof_field(mpool_elem_t, stats_opt), - offsetof(mpool_elem_t, pool_cfg), sizeof_field(mpool_elem_t, pool_cfg), - offsetof(mpool_elem_t, name), sizeof_field(mpool_elem_t, name)); - - EM_PRINT("\n"); -} +/* + * Copyright (c) 2015-2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +#ifndef __clang__ +COMPILE_TIME_ASSERT(EM_POOL_DEFAULT > (em_pool_t)0 && + EM_POOL_DEFAULT < (em_pool_t)EM_CONFIG_POOLS, + EM_ODP_EM_DEFAULT_POOL_ERROR); +COMPILE_TIME_ASSERT(EM_POOL_UNDEF != EM_POOL_DEFAULT, + EM_ODP_EM_POOL_UNDEF_ERROR); +#endif +COMPILE_TIME_ASSERT(EM_EVENT_USER_AREA_MAX_SIZE < UINT16_MAX, + EM_ODP_EM_EVENT_USER_AREA_MAX_SIZE_ERROR); + +/** + * @def ALIGN_OFFSET_MAX + * + * Max supported value for the config file option 'pool.align_offset'. + */ +#define ALIGN_OFFSET_MAX ((int)(16)) + +/* ALIGN_OFFSET_MAX <= 2^bits - 1, must fit into event_hdr_t::align_offset */ +COMPILE_TIME_ASSERT(ALIGN_OFFSET_MAX <= + ((1 << (sizeof_field(event_hdr_t, align_offset) * 8)) - 1), + ALIGN_OFFSET_MAX__TOO_LARGE); + +/** + * @brief Undef value for a pool_subpool_t + * pool_subpool_undef = {.pool = EM_POOL_UNDEF, .subpool = 0}; + */ +const pool_subpool_t pool_subpool_undef = {.pool = (uint32_t)(uintptr_t)EM_POOL_UNDEF, + .subpool = 0}; + +static inline mpool_elem_t * +mpool_poolelem2pool(objpool_elem_t *const objpool_elem) +{ + return (mpool_elem_t *)((uintptr_t)objpool_elem - + offsetof(mpool_elem_t, objpool_elem)); +} + +static em_pool_t +pool_alloc(em_pool_t pool) +{ + mpool_elem_t *mpool_elem; + + if (pool == EM_POOL_UNDEF) { + objpool_elem_t *objpool_elem = + objpool_rem(&em_shm->mpool_pool.objpool, em_core_id()); + + if (unlikely(objpool_elem == NULL)) + return EM_POOL_UNDEF; + + mpool_elem = mpool_poolelem2pool(objpool_elem); + } else { + int ret; + + mpool_elem = pool_elem_get(pool); + if (unlikely(mpool_elem == NULL)) + return EM_POOL_UNDEF; + + ret = objpool_rem_elem(&em_shm->mpool_pool.objpool, + &mpool_elem->objpool_elem); + if (unlikely(ret != 0)) + return EM_POOL_UNDEF; + } + + env_atomic32_inc(&em_shm->pool_count); + return mpool_elem->em_pool; +} + +static em_status_t +pool_free(em_pool_t pool) +{ + mpool_elem_t *mpool_elem = pool_elem_get(pool); + + if (unlikely(mpool_elem == NULL)) + return EM_ERR_BAD_ID; + + objpool_add(&em_shm->mpool_pool.objpool, + mpool_elem->objpool_elem.subpool_idx, + &mpool_elem->objpool_elem); + + env_atomic32_dec(&em_shm->pool_count); + return EM_OK; +} + +static int event_type_from_string(const char *str, em_event_type_t *event_type /*out*/) +{ + if (strstr(str, "EM_EVENT_TYPE_SW")) { + *event_type = EM_EVENT_TYPE_SW; + } else if (strstr(str, "EM_EVENT_TYPE_PACKET")) { + *event_type = EM_EVENT_TYPE_PACKET; + } else if (strstr(str, "EM_EVENT_TYPE_VECTOR")) { + *event_type = EM_EVENT_TYPE_VECTOR; + } else { + EM_LOG(EM_LOG_ERR, "Event type %s not supported.\n", str); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[i].pool_cfg.subpools[j] from the EM config file */ +static inline int read_config_subpool(const libconfig_list_t *subpool, int index, + const char *pool_cfg_str, em_pool_cfg_t *cfg/*out*/) +{ + int ret; + /* Option: subpools[index].size */ + ret = em_libconfig_list_lookup_int(subpool, index, "size", + (int *)&cfg->subpool[index].size); + if (unlikely(ret != 1)) { + EM_LOG(EM_LOG_ERR, + "Option '%s.subpools[%d].size' not found or wrong type.\n", + pool_cfg_str, index); + return -1; + } + + if (cfg->subpool[index].size <= 0) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.subpools[%d].size'.\n", + pool_cfg_str, index); + return -1; + } + + /* Option: subpools[index].num */ + ret = em_libconfig_list_lookup_int(subpool, index, "num", + (int *)&cfg->subpool[index].num); + if (unlikely(ret != 1)) { + EM_LOG(EM_LOG_ERR, + "Option '%s.subpools[%d].num' not found or wrong type.\n", + pool_cfg_str, index); + return -1; + } + + if (cfg->subpool[index].num <= 0) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.subpools[%d].num'.\n", + pool_cfg_str, index); + return -1; + } + + /* + * Option: subpools[index].cache_size + * Not mandatory + */ + ret = em_libconfig_list_lookup_int(subpool, index, "cache_size", + (int *)&cfg->subpool[index].cache_size); + + /* If cache_size is given, check if it is valid */ + if (ret == 1) { + uint32_t min_cache_size; + const odp_pool_capability_t *capa; + + capa = &em_shm->mpool_tbl.odp_pool_capability; + + min_cache_size = (cfg->event_type == EM_EVENT_TYPE_SW) ? + capa->buf.min_cache_size : capa->pkt.min_cache_size; + + if (unlikely(cfg->subpool[index].cache_size < min_cache_size)) { + EM_LOG(EM_LOG_ERR, + "'%s.subpools[%d].cache_size' too small.\n", + pool_cfg_str, index); + return -1; + } + } else if (ret == 0) {/*cache_size is given but with wrong data type*/ + EM_LOG(EM_LOG_ERR, + "'%s.subpools[%d].cache_size' wrong data type.\n", + pool_cfg_str, index); + return -1; + } + + /* No need to return fail -1 when cache_size not given (ret == -1) */ + return 0; +} + +static int is_pool_type_supported(em_event_type_t type, + const char **err_str/*out*/) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + if (type == EM_EVENT_TYPE_SW) { + if (capa->buf.max_pools == 0) { + *err_str = "SW (buf) pool type unsupported"; + return -1; + } + } else if (type == EM_EVENT_TYPE_PACKET) { + if (capa->pkt.max_pools == 0) { + *err_str = "PACKET pool type unsupported"; + return -1; + } + } else if (type == EM_EVENT_TYPE_VECTOR) { + if (capa->vector.max_pools == 0) { + *err_str = "VECTOR pool type unsupported"; + return -1; + } + } else { + *err_str = "Pool type unsupported, use _SW, _PACKET or _VECTOR"; + return -1; + } + + return 0; +} + +static inline bool is_align_offset_valid(const em_pool_cfg_t *pool_cfg) +{ + if (pool_cfg->align_offset.in_use && + (pool_cfg->align_offset.value > ALIGN_OFFSET_MAX || + !POWEROF2(pool_cfg->align_offset.value))) { + return false; + } + + return true; +} + +static inline int is_user_area_valid(const em_pool_cfg_t *pool_cfg, + const odp_pool_capability_t *capa, + const char **err_str/*out*/) +{ + /* No need to check when pool specific value is not used */ + if (!pool_cfg->user_area.in_use) + return 0; + + if (pool_cfg->user_area.size > EM_EVENT_USER_AREA_MAX_SIZE) { + *err_str = "Event user area too large"; + return -1; + } + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { + size_t req_odp_uarea_sz = pool_cfg->user_area.size + + sizeof(event_hdr_t); + if (req_odp_uarea_sz > capa->pkt.max_uarea_size) { + *err_str = "ODP pkt max uarea not large enough"; + return -1; + } + } + if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) { + size_t req_odp_uarea_sz = pool_cfg->user_area.size + + sizeof(event_hdr_t); + if (req_odp_uarea_sz > capa->vector.max_uarea_size) { + *err_str = "ODP pkt-vector max uarea not large enough"; + return -1; + } + } + + return 0; +} + +/* Read option: startup_pools.conf[index].pool_cfg.align_offset from the EM config file */ +static inline int read_config_align_offset(const libconfig_group_t *align_offset, + const char *pool_cfg_str, + em_pool_cfg_t *cfg/*out*/) +{ + int ret; + + /* Option: startup_pools.conf[index].pool_cfg.align_offset.in_use */ + ret = em_libconfig_group_lookup_bool(align_offset, "in_use", + &cfg->align_offset.in_use); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.align_offset.in_use' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.align_offset.value */ + ret = em_libconfig_group_lookup_int(align_offset, "value", + (int *)&cfg->align_offset.value); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.align_offset.value' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /* Check whether the given value is valid or not */ + if (!is_align_offset_valid(cfg)) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.align_offset.value': %d\n" + "Max align_offset is %d and it must be power of 2\n", + pool_cfg_str, cfg->align_offset.value, ALIGN_OFFSET_MAX); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[index].pool_cfg.user_area from the EM config file */ +static inline int read_config_user_area(const libconfig_group_t *user_area, + const char *pool_cfg_str, + em_pool_cfg_t *cfg/*out*/) +{ + int ret; + const odp_pool_capability_t *capa; + const char *err_str = ""; + + /* Option: startup_pools.conf[index].pool_cfg.user_area.in_use */ + ret = em_libconfig_group_lookup_bool(user_area, "in_use", + &cfg->user_area.in_use); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.user_area.in_use' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.user_area.size */ + ret = em_libconfig_group_lookup_int(user_area, "size", + (int *)&cfg->user_area.size); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.user_area.size' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + capa = &em_shm->mpool_tbl.odp_pool_capability; + /* Check whether the given value is valid or not */ + if (is_user_area_valid(cfg, capa, &err_str) < 0) { + EM_LOG(EM_LOG_ERR, "%s: %ld\n", err_str, cfg->user_area.size); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[index].pool_cfg.pkt.headroom from the EM config file */ +static inline int read_config_pkt_headroom(const libconfig_group_t *pkt_headroom, + const char *pool_cfg_str, + em_pool_cfg_t *cfg/*out*/) +{ + int ret; + const odp_pool_capability_t *capa; + + /*Option: startup_pools.conf[index].pool_cfg.pkt.headroom.in_use*/ + ret = em_libconfig_group_lookup_bool(pkt_headroom, "in_use", + &cfg->pkt.headroom.in_use); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.pkt.headroom.in_use' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /*Option: startup_pools.conf[index].pool_cfg.pkt.headroom.value*/ + ret = em_libconfig_group_lookup_int(pkt_headroom, "value", + (int *)&cfg->pkt.headroom.value); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.pkt.headroom.value' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /* Check whether the given value is valid or not */ + capa = &em_shm->mpool_tbl.odp_pool_capability; + if (cfg->pkt.headroom.in_use && + cfg->pkt.headroom.value > capa->pkt.max_headroom) { + EM_LOG(EM_LOG_ERR, + "'%s.pkt.headroom.value' %d too large (max=%d)\n", + pool_cfg_str, cfg->pkt.headroom.value, + capa->pkt.max_headroom); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[index] from the EM config file */ +static int read_config_startup_pools_conf(const libconfig_list_t *list, int index) +{ + int ret; + int pool; + int ret_pool; + int num_subpools; + const char *pool_name; + const char *event_type; + char pool_cfg_str[40]; + libconfig_group_t *pool_cfg; + const libconfig_list_t *subpool; + const libconfig_group_t *headroom; + const libconfig_group_t *user_area; + const libconfig_group_t *align_offset; + startup_pool_conf_t *conf = &em_shm->opt.startup_pools.conf[index]; + em_pool_cfg_t *cfg = &conf->cfg; + const char *err_str = ""; + + snprintf(pool_cfg_str, sizeof(pool_cfg_str), + "startup_pools.conf[%d].pool_cfg", index); + + pool_cfg = em_libconfig_list_lookup_group(list, index, "pool_cfg"); + if (!pool_cfg) { + EM_LOG(EM_LOG_ERR, "Conf option '%s' not found\n", pool_cfg_str); + return -1; + } + + em_pool_cfg_init(cfg); + + /* + * Read mandatory fields first, in case they are not provided, no need + * to proceed to read optional fields. + */ + + /* Option: startup_pools.conf[index].pool_cfg.event_type */ + ret = em_libconfig_group_lookup_string(pool_cfg, "event_type", &event_type); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "'%s.event_type' not found.\n", pool_cfg_str); + return -1; + } + + ret = event_type_from_string(event_type, &cfg->event_type/*out*/); + if (unlikely(ret < 0)) + return -1; + + ret = is_pool_type_supported(cfg->event_type, &err_str/*out*/); + if (unlikely(ret)) { + EM_LOG(EM_LOG_ERR, "%s", err_str); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.num_subpools */ + ret = em_libconfig_group_lookup_int(pool_cfg, "num_subpools", + &cfg->num_subpools); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "'%s.num_subpools' not found.\n", pool_cfg_str); + return -1; + } + + if (cfg->num_subpools <= 0 || cfg->num_subpools > EM_MAX_SUBPOOLS) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.num_subpools'\n" + "Valid value range is [1, %d]\n", pool_cfg_str, + EM_MAX_SUBPOOLS); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.subpools */ + subpool = em_libconfig_group_lookup_list(pool_cfg, "subpools"); + if (unlikely(!subpool)) { + EM_LOG(EM_LOG_ERR, "'%s.subpools' not found.\n", pool_cfg_str); + return -1; + } + + num_subpools = em_libconfig_list_length(subpool); + if (unlikely(num_subpools != cfg->num_subpools)) { + EM_LOG(EM_LOG_ERR, "The number of subpool configuration given\n" + "in '%s.subpools' does not match '%s.num_subpools'.\n", + pool_cfg_str, pool_cfg_str); + return -1; + } + + for (int j = 0; j < num_subpools; j++) { + ret = read_config_subpool(subpool, j, pool_cfg_str, cfg); + + if (unlikely(ret < 0)) + return -1; + } + + /* Following are optional configurations */ + + /* Option: startup_pools.conf[index].pool */ + ret_pool = em_libconfig_list_lookup_int(list, index, "pool", &pool); + if (unlikely(ret_pool == 0)) { + EM_LOG(EM_LOG_ERR, + "'startup_pools.conf[%d].pool' has wrong data type(expect int)\n", + index); + return -1; + } + + /* startup_pools.conf[index].pool is provided */ + if (ret_pool == 1) { + if (pool < 0 || pool > EM_CONFIG_POOLS) { + EM_LOG(EM_LOG_ERR, "Invalid pool ID %d, valid IDs are within [0, %d]\n", + pool, EM_CONFIG_POOLS); + return -1; + } + + conf->pool = (em_pool_t)(uintptr_t)pool; + } + + /* Option: startup_pools.conf[index].name */ + ret = em_libconfig_list_lookup_string(list, index, "name", &pool_name); + if (unlikely(ret == 0)) { + EM_LOG(EM_LOG_ERR, + "'startup_pools.conf[%d].name' has wrong data type(expect string)\n", + index); + return -1; + } + + if (ret_pool == 1 && ret == 1) { /*Both pool and name have been given*/ + const char *is_default_name = strstr(pool_name, EM_POOL_DEFAULT_NAME); + bool is_default_id = (conf->pool == EM_POOL_DEFAULT); + + if (is_default_name && !is_default_id) { + EM_LOG(EM_LOG_ERR, + "Default name \"%s\" with non-default ID %d\n", + EM_POOL_DEFAULT_NAME, (int)(uintptr_t)conf->pool); + return -1; + } + + if (is_default_id && !is_default_name) { + EM_LOG(EM_LOG_ERR, + "Default pool ID 1 with non-default name \"%s\"\n", + pool_name); + return -1; + } + } + + if (ret == 1) { /* Pool name is given and no conflict with pool ID */ + strncpy(conf->name, pool_name, EM_POOL_NAME_LEN - 1); + conf->name[EM_POOL_NAME_LEN - 1] = '\0'; + } + + align_offset = em_libconfig_group_lookup_group(pool_cfg, "align_offset"); + /*align_offset is provided*/ + if (align_offset && read_config_align_offset(align_offset, pool_cfg_str, cfg)) + return -1; + + user_area = em_libconfig_group_lookup_group(pool_cfg, "user_area"); + if (user_area && read_config_user_area(user_area, pool_cfg_str, cfg)) + return -1; + + headroom = em_libconfig_group_lookup_group(pool_cfg, "pkt.headroom"); + if (headroom) { + if (read_config_pkt_headroom(headroom, pool_cfg_str, cfg)) + return -1; + + /* Ignore the given pkt.headroom for non packet event type */ + if (conf->cfg.event_type != EM_EVENT_TYPE_PACKET) + EM_PRINT("pkt.headroom will be ignored for non packet type!\n"); + } + + return 0; +} + +/* Print option: startup_pools from the EM config file */ +static void print_config_startup_pools(void) +{ + startup_pool_conf_t *conf; + char str_conf[32]; + const char *str = ""; + + EM_PRINT(" startup_pools.num: %u\n", em_shm->opt.startup_pools.num); + + for (uint32_t i = 0; i < em_shm->opt.startup_pools.num; i++) { + conf = &em_shm->opt.startup_pools.conf[i]; + + snprintf(str_conf, sizeof(str_conf), " startup_pools.conf[%d]", i); + + if (*conf->name) + EM_PRINT("%s.name: %s\n", str_conf, conf->name); + + if (conf->pool) + EM_PRINT("%s.pool: %d\n", str_conf, (int)(uintptr_t)conf->pool); + + /*event type*/ + if (conf->cfg.event_type == EM_EVENT_TYPE_SW) + str = "EM_EVENT_TYPE_SW"; + else if (conf->cfg.event_type == EM_EVENT_TYPE_PACKET) + str = "EM_EVENT_TYPE_PACKET"; + else if (conf->cfg.event_type == EM_EVENT_TYPE_VECTOR) + str = "EM_EVENT_TYPE_VECTOR"; + EM_PRINT("%s.pool_cfg.event_type: %s\n", str_conf, str); + + /*align_offset*/ + str = conf->cfg.align_offset.in_use ? "true" : "false"; + EM_PRINT("%s.pool_cfg.align_offset.in_use: %s\n", str_conf, str); + EM_PRINT("%s.pool_cfg.align_offset.value: %d\n", str_conf, + conf->cfg.align_offset.value); + + /*user area*/ + str = conf->cfg.user_area.in_use ? "true" : "false"; + EM_PRINT("%s.pool_cfg.user_area.in_use: %s\n", str_conf, str); + EM_PRINT("%s.pool_cfg.user_area.size: %ld\n", str_conf, + conf->cfg.user_area.size); + + /*pkt headroom*/ + str = conf->cfg.pkt.headroom.in_use ? "true" : "false"; + EM_PRINT("%s.pool_cfg.pkt.headroom.in_use: %s\n", str_conf, str); + EM_PRINT("%s.pool_cfg.pkt.headroom.value: %d\n", str_conf, + conf->cfg.pkt.headroom.value); + + /*number of subpools*/ + EM_PRINT("%s.pool_cfg.num_subpools: %u\n", str_conf, + conf->cfg.num_subpools); + + /*subpools*/ + for (int j = 0; j < conf->cfg.num_subpools; j++) { + EM_PRINT("%s.pool_cfg.subpools[%d].size: %u\n", str_conf, + j, conf->cfg.subpool[j].size); + + EM_PRINT("%s.pool_cfg.subpools[%d].num: %u\n", str_conf, + j, conf->cfg.subpool[j].num); + + EM_PRINT("%s.pool_cfg.subpools[%d].cache_size: %u\n", + str_conf, j, conf->cfg.subpool[j].cache_size); + } + } +} + +/* Read option: startup_pools from the EM config file */ +static int read_config_startup_pools(void) +{ + int ret; + int list_len; + int num_startup_pools; + const libconfig_list_t *conf_list; + libconfig_setting_t *default_setting; + libconfig_setting_t *runtime_setting; + libconfig_setting_t *startup_pools_setting; + + em_libconfig_lookup(&em_shm->libconfig, "startup_pools", + &default_setting, &runtime_setting); + + /* + * Option: startup_pools + * + * Optional. Thus, when runtime configuration is provided, and option + * "startup_pools" is given, use it. However, when option "startup_pools" + * is not specified in the given runtime configuration file, returns + * without giving error, which means no startup pools will be created. + * Note that it does not fall back to use the option "startup_pools" + * specified in the default configuration file. + */ + if (em_shm->libconfig.has_cfg_runtime) { + if (runtime_setting) + startup_pools_setting = runtime_setting; + else + return 0; + } else { + if (default_setting) + startup_pools_setting = default_setting; + else + return 0; + } + + EM_PRINT("EM-startup_pools config:\n"); + /* + * Option: startup_pools.num + * Mandatory when startup_pools option is given + */ + ret = em_libconfig_setting_lookup_int(startup_pools_setting, "num", + &num_startup_pools); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Option 'startup_pools.num' not found\n"); + return -1; + } + + if (num_startup_pools <= 0 || num_startup_pools > EM_CONFIG_POOLS - 1) { + EM_LOG(EM_LOG_ERR, + "Number of startup_pools %d is too large or too small\n" + "Valid value range is [1, %d]\n", + num_startup_pools, EM_CONFIG_POOLS - 1); + return -1; + } + + conf_list = em_libconfig_setting_get_list(startup_pools_setting, "conf"); + if (!conf_list) { + EM_LOG(EM_LOG_ERR, "Conf option 'startup_pools.conf' not found\n"); + return -1; + } + + list_len = em_libconfig_list_length(conf_list); + if (list_len != num_startup_pools) { + EM_LOG(EM_LOG_ERR, + "The number of pool configuration(s) given in\n" + "'startup_pools.conf':%d does not match number of\n" + "startup_pools specified in 'startup_pools.num': %d\n", + list_len, num_startup_pools); + return -1; + } + + for (int i = 0; i < list_len; i++) { + if (read_config_startup_pools_conf(conf_list, i) < 0) + return -1; + } + + em_shm->opt.startup_pools.num = num_startup_pools; + + print_config_startup_pools(); + return 0; +} + +/* Read option: pool from the EM config file */ +static int read_config_pool(void) +{ + const char *conf_str; + bool val_bool = false; + int val = 0; + int ret; + + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + + EM_PRINT("EM-pool config:\n"); + + /* + * Option: pool.statistics.available + */ + conf_str = "pool.statistics.available"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.available = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.alloc_ops + */ + conf_str = "pool.statistics.alloc_ops"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.alloc_ops = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.alloc_fails + */ + conf_str = "pool.statistics.alloc_fails"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.alloc_fails = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.free_ops + */ + conf_str = "pool.statistics.free_ops"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.free_ops = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.total_ops + */ + conf_str = "pool.statistics.total_ops"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.total_ops = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.cache_available + */ + conf_str = "pool.statistics.cache_available"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.cache_available = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.cache_alloc_ops + */ + conf_str = "pool.statistics.cache_alloc_ops"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.cache_alloc_ops = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.cache_free_ops + */ + conf_str = "pool.statistics.cache_free_ops"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.cache_free_ops = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.statistics.core_cache_available + */ + conf_str = "pool.statistics.core_cache_available"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + em_shm->opt.pool.statistics.core_cache_available = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", val_bool); + + /* + * Option: pool.align_offset + */ + conf_str = "pool.align_offset"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + if (val < 0 || val > ALIGN_OFFSET_MAX || !POWEROF2(val)) { + EM_LOG(EM_LOG_ERR, + "Bad config value '%s = %d' (max: %d and value must be power of 2)\n", + conf_str, val, ALIGN_OFFSET_MAX); + return -1; + } + /* store & print the value */ + em_shm->opt.pool.align_offset = val; + EM_PRINT(" %s (default): %d (max: %d)\n", + conf_str, val, ALIGN_OFFSET_MAX); + + /* + * Option: pool.user_area_size + */ + conf_str = "pool.user_area_size"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + if (val < 0 || (unsigned int)val > capa->pkt.max_uarea_size || + val > EM_EVENT_USER_AREA_MAX_SIZE) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", + conf_str, val); + return -1; + } + /* store & print the value */ + em_shm->opt.pool.user_area_size = val; + EM_PRINT(" %s (default): %d (max: %d)\n", + conf_str, val, + MIN(EM_EVENT_USER_AREA_MAX_SIZE, capa->pkt.max_uarea_size)); + + /* + * Option: pool.pkt_headroom + */ + conf_str = "pool.pkt_headroom"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + + if (val < 0 || (unsigned int)val > capa->pkt.max_headroom) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", + conf_str, val); + return -1; + } + /* store & print the value */ + em_shm->opt.pool.pkt_headroom = val; + EM_PRINT(" %s (default): %d (max: %u)\n", + conf_str, val, capa->pkt.max_headroom); + + return 0; +} + +static int +read_config_file(void) +{ + /* Option: pool */ + if (read_config_pool() < 0) + return -1; + + /* Option: startup_pools */ + if (read_config_startup_pools() < 0) + return -1; + + return 0; +} + +/* We use following static asserts and function check_em_pool_subpool_stats() + * to verify at both compile time and runtime that, em_pool_subpool_stats_t is + * exactly the same as odp_pool_stats_t except the last struct member, namely, + * 'em_pool_subpool_stats_t::__internal_use', whose size must also be bigger + * than that of 'odp_pool_stats_t::thread'. This allows us to avoid exposing ODP + * type in EM-ODP API (at event_machine_pool.h in this case) and allows us to + * type cast 'em_pool_subpool_stats_t' to 'odp_pool_stats_t', ensuring high + * performance (see em_pool_stats() and em_pool_subpool_stats()). + */ + +ODP_STATIC_ASSERT(sizeof(odp_pool_stats_t) <= sizeof(em_pool_subpool_stats_t), + "Size of odp_pool_stats_t must be smaller than that of em_pool_subpool_stats_t"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, available) == + offsetof(em_pool_subpool_stats_t, available) && + sizeof_field(odp_pool_stats_t, available) == + sizeof_field(em_pool_subpool_stats_t, available), + "em_pool_subpool_stats_t.available differs from odp_pool_stats_t.available!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, alloc_ops) == + offsetof(em_pool_subpool_stats_t, alloc_ops) && + sizeof_field(odp_pool_stats_t, alloc_ops) == + sizeof_field(em_pool_subpool_stats_t, alloc_ops), + "em_pool_subpool_stats_t.alloc_ops differs from odp_pool_stats_t.alloc_ops!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, alloc_fails) == + offsetof(em_pool_subpool_stats_t, alloc_fails) && + sizeof_field(odp_pool_stats_t, alloc_fails) == + sizeof_field(em_pool_subpool_stats_t, alloc_fails), + "em_pool_subpool_stats_t.alloc_fails differs from odp_pool_stats_t.alloc_fails!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, free_ops) == + offsetof(em_pool_subpool_stats_t, free_ops) && + sizeof_field(odp_pool_stats_t, free_ops) == + sizeof_field(em_pool_subpool_stats_t, free_ops), + "em_pool_subpool_stats_t.free_ops differs from odp_pool_stats_t.free_ops!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, total_ops) == + offsetof(em_pool_subpool_stats_t, total_ops) && + sizeof_field(odp_pool_stats_t, total_ops) == + sizeof_field(em_pool_subpool_stats_t, total_ops), + "em_pool_subpool_stats_t.total_ops differs from odp_pool_stats_t.total_ops!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, cache_available) == + offsetof(em_pool_subpool_stats_t, cache_available) && + sizeof_field(odp_pool_stats_t, cache_available) == + sizeof_field(em_pool_subpool_stats_t, cache_available), + "em_pool_subpool_stats_t.cache_available differs from that of odp_pool_stats_t!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, cache_alloc_ops) == + offsetof(em_pool_subpool_stats_t, cache_alloc_ops) && + sizeof_field(odp_pool_stats_t, cache_alloc_ops) == + sizeof_field(em_pool_subpool_stats_t, cache_alloc_ops), + "em_pool_subpool_stats_t.cache_alloc_ops differs from that of odp_pool_stats_t!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, cache_free_ops) == + offsetof(em_pool_subpool_stats_t, cache_free_ops) && + sizeof_field(odp_pool_stats_t, cache_free_ops) == + sizeof_field(em_pool_subpool_stats_t, cache_free_ops), + "em_pool_subpool_stats_t.cache_free_ops differs from that of odp_pool_stats_t!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_t, thread) == + offsetof(em_pool_subpool_stats_t, __internal_use) && + sizeof_field(odp_pool_stats_t, thread) <= + sizeof_field(em_pool_subpool_stats_t, __internal_use), + "em_pool_subpool_stats_t.__internal_use differs from odp_pool_stats_t.thread"); + +#define STRUCT_ERR_STR \ +"em_pool_subpool_stats_t.%s differs from odp_pool_stats_t.%s either in size or in offset!\n" + +static int check_em_pool_subpool_stats(void) +{ + if (sizeof(odp_pool_stats_t) > sizeof(em_pool_subpool_stats_t)) { + EM_LOG(EM_LOG_ERR, + "Size of odp_pool_stats_t bigger than that of em_pool_subpool_stats_t\n"); + return -1; + } + + if (offsetof(odp_pool_stats_t, available) != + offsetof(em_pool_subpool_stats_t, available) || + sizeof_field(odp_pool_stats_t, available) != + sizeof_field(em_pool_subpool_stats_t, available)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "available", "available"); + return -1; + } + + if (offsetof(odp_pool_stats_t, alloc_ops) != + offsetof(em_pool_subpool_stats_t, alloc_ops) || + sizeof_field(odp_pool_stats_t, alloc_ops) != + sizeof_field(em_pool_subpool_stats_t, alloc_ops)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "alloc_ops", "alloc_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_t, alloc_fails) != + offsetof(em_pool_subpool_stats_t, alloc_fails) || + sizeof_field(odp_pool_stats_t, alloc_fails) != + sizeof_field(em_pool_subpool_stats_t, alloc_fails)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "alloc_fails", "alloc_fails"); + return -1; + } + + if (offsetof(odp_pool_stats_t, free_ops) != + offsetof(em_pool_subpool_stats_t, free_ops) || + sizeof_field(odp_pool_stats_t, free_ops) != + sizeof_field(em_pool_subpool_stats_t, free_ops)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "free_ops", "free_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_t, total_ops) != + offsetof(em_pool_subpool_stats_t, total_ops) || + sizeof_field(odp_pool_stats_t, total_ops) != + sizeof_field(em_pool_subpool_stats_t, total_ops)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "total_ops", "total_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_t, cache_available) != + offsetof(em_pool_subpool_stats_t, cache_available) || + sizeof_field(odp_pool_stats_t, cache_available) != + sizeof_field(em_pool_subpool_stats_t, cache_available)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "cache_available", "cache_available"); + return -1; + } + + if (offsetof(odp_pool_stats_t, cache_alloc_ops) != + offsetof(em_pool_subpool_stats_t, cache_alloc_ops) || + sizeof_field(odp_pool_stats_t, cache_alloc_ops) != + sizeof_field(em_pool_subpool_stats_t, cache_alloc_ops)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "cache_alloc_ops", "cache_alloc_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_t, cache_free_ops) != + offsetof(em_pool_subpool_stats_t, cache_free_ops) || + sizeof_field(odp_pool_stats_t, cache_free_ops) != + sizeof_field(em_pool_subpool_stats_t, cache_free_ops)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "cache_free_ops", "cache_free_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_t, thread) != + offsetof(em_pool_subpool_stats_t, __internal_use) || + sizeof_field(odp_pool_stats_t, thread) > + sizeof_field(em_pool_subpool_stats_t, __internal_use)) { + EM_LOG(EM_LOG_ERR, STRUCT_ERR_STR, "__internal_use", "thread"); + return -1; + } + + return 0; +} + +/* We use following static asserts and function check_em_pool_subpool_stats_selected() + * to verify at both compile time and runtime that, em_pool_subpool_stats_selected_t + * is exactly the same as odp_pool_stats_selected_t This allows us to avoid exposing + * ODP type in EM-ODP API (at event_machine_pool.h in this case) and allows us to + * type cast 'em_pool_subpool_stats_selected_t' to 'odp_pool_stats_selected_t', ensuring + * high performance (see em_pool_stats_selected() and em_pool_subpool_stats_selected()). + */ + +#define SIZE_NOT_EQUAL_ERR_STR \ +"Size of odp_pool_stats_selected_t must equal to that of em_pool_subpool_stats_selected_t\n" + +ODP_STATIC_ASSERT(sizeof(odp_pool_stats_selected_t) == sizeof(em_pool_subpool_stats_selected_t), + SIZE_NOT_EQUAL_ERR_STR); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, available) == + offsetof(em_pool_subpool_stats_selected_t, available) && + sizeof_field(odp_pool_stats_selected_t, available) == + sizeof_field(em_pool_subpool_stats_selected_t, available), + "available in em_pool_subpool_stats_selected_t and odp_pool_stats_selected_t differs!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, alloc_ops) == + offsetof(em_pool_subpool_stats_selected_t, alloc_ops) && + sizeof_field(odp_pool_stats_selected_t, alloc_ops) == + sizeof_field(em_pool_subpool_stats_selected_t, alloc_ops), + "em_pool_subpool_stats_selected_t.alloc_ops differs from odp_pool_stats_selected_t.alloc_ops!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, alloc_fails) == + offsetof(em_pool_subpool_stats_t, alloc_fails) && + sizeof_field(odp_pool_stats_selected_t, alloc_fails) == + sizeof_field(em_pool_subpool_stats_t, alloc_fails), + "em_pool_subpool_stats_selected_t.alloc_fails differs from odp_pool_stats_selected_t.alloc_fails!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, free_ops) == + offsetof(em_pool_subpool_stats_selected_t, free_ops) && + sizeof_field(odp_pool_stats_selected_t, free_ops) == + sizeof_field(em_pool_subpool_stats_selected_t, free_ops), + "em_pool_subpool_stats_selected_t.free_ops differs from odp_pool_stats_selected_t.free_ops!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, total_ops) == + offsetof(em_pool_subpool_stats_selected_t, total_ops) && + sizeof_field(odp_pool_stats_selected_t, total_ops) == + sizeof_field(em_pool_subpool_stats_selected_t, total_ops), + "em_pool_subpool_stats_selected_t.total_ops differs from odp_pool_stats_selected_t.total_ops!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, cache_available) == + offsetof(em_pool_subpool_stats_selected_t, cache_available) && + sizeof_field(odp_pool_stats_selected_t, cache_available) == + sizeof_field(em_pool_subpool_stats_selected_t, cache_available), + "em_pool_subpool_stats_selected_t.cache_available differs from that of odp_pool_stats_selected_t!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, cache_alloc_ops) == + offsetof(em_pool_subpool_stats_selected_t, cache_alloc_ops) && + sizeof_field(odp_pool_stats_selected_t, cache_alloc_ops) == + sizeof_field(em_pool_subpool_stats_selected_t, cache_alloc_ops), + "em_pool_subpool_stats_selected_t.cache_alloc_ops differs from that of odp_pool_stats_selected_t!"); + +ODP_STATIC_ASSERT(offsetof(odp_pool_stats_selected_t, cache_free_ops) == + offsetof(em_pool_subpool_stats_selected_t, cache_free_ops) && + sizeof_field(odp_pool_stats_selected_t, cache_free_ops) == + sizeof_field(em_pool_subpool_stats_selected_t, cache_free_ops), + "em_pool_subpool_stats_selected_t.cache_free_ops differs from that of odp_pool_stats_selected_t!"); + +#define SELECTED_TYPE_ERR_FMT \ +"em_pool_subpool_stats_selected_t.%s differs from odp_pool_stats_selected_t.%s\n" + +static int check_em_pool_subpool_stats_selected(void) +{ + if (sizeof(odp_pool_stats_selected_t) != sizeof(em_pool_subpool_stats_selected_t)) { + EM_LOG(EM_LOG_ERR, + "odp_pool_stats_selected_t vs em_pool_subpool_stats_selected_t size diff\n"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, available) != + offsetof(em_pool_subpool_stats_selected_t, available) || + sizeof_field(odp_pool_stats_selected_t, available) != + sizeof_field(em_pool_subpool_stats_selected_t, available)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "available", "available"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, alloc_ops) != + offsetof(em_pool_subpool_stats_selected_t, alloc_ops) || + sizeof_field(odp_pool_stats_selected_t, alloc_ops) != + sizeof_field(em_pool_subpool_stats_selected_t, alloc_ops)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "alloc_ops", "alloc_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, alloc_fails) != + offsetof(em_pool_subpool_stats_selected_t, alloc_fails) || + sizeof_field(odp_pool_stats_selected_t, alloc_fails) != + sizeof_field(em_pool_subpool_stats_selected_t, alloc_fails)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "alloc_fails", "alloc_fails"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, free_ops) != + offsetof(em_pool_subpool_stats_selected_t, free_ops) || + sizeof_field(odp_pool_stats_selected_t, free_ops) != + sizeof_field(em_pool_subpool_stats_selected_t, free_ops)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "free_ops", "free_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, total_ops) != + offsetof(em_pool_subpool_stats_selected_t, total_ops) || + sizeof_field(odp_pool_stats_selected_t, total_ops) != + sizeof_field(em_pool_subpool_stats_selected_t, total_ops)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "total_ops", "total_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, cache_available) != + offsetof(em_pool_subpool_stats_selected_t, cache_available) || + sizeof_field(odp_pool_stats_selected_t, cache_available) != + sizeof_field(em_pool_subpool_stats_selected_t, cache_available)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "cache_available", "cache_available"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, cache_alloc_ops) != + offsetof(em_pool_subpool_stats_selected_t, cache_alloc_ops) || + sizeof_field(odp_pool_stats_selected_t, cache_alloc_ops) != + sizeof_field(em_pool_subpool_stats_selected_t, cache_alloc_ops)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "cache_alloc_ops", "cache_alloc_ops"); + return -1; + } + + if (offsetof(odp_pool_stats_selected_t, cache_free_ops) != + offsetof(em_pool_subpool_stats_selected_t, cache_free_ops) || + sizeof_field(odp_pool_stats_selected_t, cache_free_ops) != + sizeof_field(em_pool_subpool_stats_selected_t, cache_free_ops)) { + EM_LOG(EM_LOG_ERR, SELECTED_TYPE_ERR_FMT, "cache_free_ops", "cache_free_ops"); + return -1; + } + + return 0; +} + +ODP_STATIC_ASSERT(sizeof(odp_pool_stats_opt_t) == sizeof(em_pool_stats_opt_t), + "Size of odp_pool_stats_opt_t differs from that of em_pool_stats_opt_t\n"); + +em_status_t +pool_init(mpool_tbl_t *const mpool_tbl, mpool_pool_t *const mpool_pool, + const em_pool_cfg_t *default_pool_cfg) +{ + int ret; + em_pool_t pool; + em_pool_t pool_default; + startup_pool_conf_t *startup_pool_conf; + bool default_pool_set = false; + const uint32_t objpool_subpools = MIN(4, OBJSUBPOOLS_MAX); + + /* Return error if em_pool_subpool_stats_t differs from odp_pool_stats_t */ + if (check_em_pool_subpool_stats()) + return EM_ERR; + + /*Return error if em_pool_subpool_stats_selected_t differs from odp_pool_stats_selected_t*/ + if (check_em_pool_subpool_stats_selected()) + return EM_ERR; + + memset(mpool_tbl, 0, sizeof(mpool_tbl_t)); + memset(mpool_pool, 0, sizeof(mpool_pool_t)); + env_atomic32_init(&em_shm->pool_count); + + ret = objpool_init(&mpool_pool->objpool, objpool_subpools); + if (ret != 0) + return EM_ERR_OPERATION_FAILED; + + for (uint32_t i = 0; i < EM_CONFIG_POOLS; i++) { + pool = pool_idx2hdl(i); + mpool_elem_t *mpool_elem = pool_elem_get(pool); + + if (unlikely(!mpool_elem)) + return EM_ERR_BAD_POINTER; + + mpool_elem->em_pool = pool; + mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; + for (int j = 0; j < EM_MAX_SUBPOOLS; j++) { + mpool_elem->odp_pool[j] = ODP_POOL_INVALID; + mpool_elem->size[j] = 0; + } + + objpool_add(&mpool_pool->objpool, i % objpool_subpools, + &mpool_elem->objpool_elem); + } + + /* Init the mapping tbl from odp-pool(=subpool) index to em-pool */ + if (odp_pool_max_index() >= POOL_ODP2EM_TBL_LEN) + return EM_ERR_TOO_LARGE; + for (int i = 0; i < POOL_ODP2EM_TBL_LEN; i++) + mpool_tbl->pool_subpool_odp2em[i].both = pool_subpool_undef.both; + + /* Store common ODP pool capabilities in the mpool_tbl for easy access*/ + if (odp_pool_capability(&mpool_tbl->odp_pool_capability) != 0) + return EM_ERR_LIB_FAILED; + + /* Read EM-pool and EM-startup_pools related runtime config options */ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + /* + * Create default and startup pools. + * + * If default pool configuration is given through 'startup_pools.conf' + * in em-odp.conf, use that instead. Otherwise use default_pool_cfg. + * + * Allocate/reserve default pool first here so when creating startup + * pools whose configuration does not provide pool handle, default pool + * handle EM_POOL_DEFAULT(1) won't be allocated to them. + */ + pool_default = pool_alloc(EM_POOL_DEFAULT); + + if (unlikely(pool_default == EM_POOL_UNDEF || + pool_default != EM_POOL_DEFAULT)) + return EM_ERR_ALLOC_FAILED; + + /* Create startup pools whose configuration is provided by the EM config file */ + for (uint32_t i = 0; i < em_shm->opt.startup_pools.num; i++) { + startup_pool_conf = &em_shm->opt.startup_pools.conf[i]; + + /* Default pool is provided by the EM config file */ + if (strstr(startup_pool_conf->name, EM_POOL_DEFAULT_NAME) || + startup_pool_conf->pool == EM_POOL_DEFAULT) { + default_pool_set = true; + pool_free(EM_POOL_DEFAULT); + pool = em_pool_create(EM_POOL_DEFAULT_NAME, + EM_POOL_DEFAULT, + &startup_pool_conf->cfg); + } else { + pool = em_pool_create(startup_pool_conf->name, + startup_pool_conf->pool, + &startup_pool_conf->cfg); + } + + if (pool == EM_POOL_UNDEF) + return EM_ERR_ALLOC_FAILED; + } + + /* Create the default pool if it is not provided by the EM config file */ + if (!default_pool_set) { + pool_free(EM_POOL_DEFAULT); + pool = em_pool_create(EM_POOL_DEFAULT_NAME, EM_POOL_DEFAULT, + default_pool_cfg); + if (pool == EM_POOL_UNDEF || pool != EM_POOL_DEFAULT) + return EM_ERR_ALLOC_FAILED; + } + + return EM_OK; +} + +em_status_t +pool_term(const mpool_tbl_t *mpool_tbl) +{ + em_status_t stat = EM_OK; + + (void)mpool_tbl; + + EM_PRINT("\n" + "Status before delete:\n"); + em_pool_info_print_all(); + + for (int i = 0; i < EM_CONFIG_POOLS; i++) { + em_pool_t pool = pool_idx2hdl(i); + const mpool_elem_t *mpool_elem = pool_elem_get(pool); + em_status_t ret; + + if (mpool_elem && pool_allocated(mpool_elem)) { + ret = pool_delete(pool); + if (ret != EM_OK) + stat = ret; /* save last error as return val */ + } + } + + return stat; +} + +/* Helper func to invalid_pool_cfg() */ +static int invalid_pool_cache_cfg(const em_pool_cfg_t *pool_cfg, + const char **err_str/*out*/) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + uint32_t min_cache_size; + uint32_t cache_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_SW) + min_cache_size = capa->buf.min_cache_size; + else if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) + min_cache_size = capa->pkt.min_cache_size; + else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) + min_cache_size = capa->vector.min_cache_size; + else + return -9; + + for (int i = 0; i < pool_cfg->num_subpools; i++) { + if (pool_cfg->subpool[i].size <= 0 || + pool_cfg->subpool[i].num <= 0) { + *err_str = "Invalid subpool size/num"; + return -(1 * 10 + i); /* -10, -11, ... */ + } + + cache_size = pool_cfg->subpool[i].cache_size; + if (unlikely(cache_size < min_cache_size)) { + *err_str = "Requested cache size too small"; + return -(2 * 10 + i); /* -20, -21, ... */ + } + /* + * If the given cache size is larger than odp-max, + * then use odp-max: + * if (cache_size > max_cache_size) + * cache_size = max_cache_size; + * This is done later in pool_create(); + */ + } + + return 0; +} + +int invalid_pool_cfg(const em_pool_cfg_t *pool_cfg, const char **err_str/*out*/) +{ + int ret = 0; + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + if (!pool_cfg) { + *err_str = "Pool config NULL"; + return -1; + } + if (pool_cfg->__internal_check != EM_CHECK_INIT_CALLED) { + *err_str = "Not initialized: em_pool_cfg_init(pool_cfg) not called"; + return -1; + } + + if (pool_cfg->num_subpools <= 0 || + pool_cfg->num_subpools > EM_MAX_SUBPOOLS) { + *err_str = "Invalid number of subpools"; + return -1; + } + + ret = is_pool_type_supported(pool_cfg->event_type, err_str/*out*/); + if (ret) + return ret; + + if (!is_align_offset_valid(pool_cfg)) { + *err_str = "Invalid align offset"; + return -1; + } + + ret = is_user_area_valid(pool_cfg, capa, err_str/*out*/); + if (ret) + return ret; + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET && + pool_cfg->pkt.headroom.in_use && + pool_cfg->pkt.headroom.value > capa->pkt.max_headroom) { + *err_str = "Requested pkt headroom size too large"; + return -1; + } + + ret = invalid_pool_cache_cfg(pool_cfg, err_str/*out*/); + + return ret; /* 0: success, <0: error */ +} + +int check_pool_uarea_persistence(const em_pool_cfg_t *pool_cfg, const char **err_str/*out*/) +{ +#if ODP_VERSION_API_NUM(1, 42, 0) <= ODP_VERSION_API + bool has_uarea_persistence; + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + switch (pool_cfg->event_type) { + case EM_EVENT_TYPE_SW: + has_uarea_persistence = capa->buf.uarea_persistence ? true : false; + *err_str = "buf-pool (EM_EVENT_TYPE_SW)"; + break; + case EM_EVENT_TYPE_PACKET: + has_uarea_persistence = capa->pkt.uarea_persistence ? true : false; + *err_str = "pkt-pool (EM_EVENT_TYPE_PACKET)"; + break; + case EM_EVENT_TYPE_VECTOR: + has_uarea_persistence = capa->vector.uarea_persistence ? true : false; + *err_str = "vector-pool (EM_EVENT_TYPE_VECTOR)"; + break; + default: + has_uarea_persistence = false; + *err_str = "unknown pool-type"; + break; + } + + return has_uarea_persistence ? 0 : -1; /* 0: success, <0: not supported */ +#else + return 0; +#endif +} + +/* + * Helper to pool_create() - preallocate all events in the pool for ESV to + * maintain event state over multiple alloc- and free-operations. + */ +static void +pool_prealloc(const mpool_elem_t *pool_elem) +{ + event_prealloc_hdr_t *prealloc_hdr = NULL; + uint64_t num_tot = 0; + uint64_t num = 0; + uint64_t num_free = 0; + const uint32_t size = pool_elem->pool_cfg.subpool[0].size; + list_node_t evlist; + list_node_t *node; + + list_init(&evlist); + + for (int i = 0; i < pool_elem->num_subpools; i++) + num_tot += pool_elem->pool_cfg.subpool[i].num; + + do { + prealloc_hdr = event_prealloc(pool_elem, size); + if (likely(prealloc_hdr)) { + list_add(&evlist, &prealloc_hdr->list_node); + num++; + } + } while (prealloc_hdr); + + if (unlikely(num < num_tot)) + INTERNAL_ERROR(EM_FATAL(EM_ERR_TOO_SMALL), + EM_ESCOPE_POOL_CREATE, + "alloc: events expected:%" PRIu64 " actual:%" PRIu64 "", + num_tot, num); + + while (!list_is_empty(&evlist)) { + node = list_rem_first(&evlist); + prealloc_hdr = list_node_to_prealloc_hdr(node); + em_free(prealloc_hdr->ev_hdr.event); + num_free++; + } + + if (unlikely(num_free > num)) + INTERNAL_ERROR(EM_FATAL(EM_ERR_TOO_LARGE), + EM_ESCOPE_POOL_CREATE, + "free: events expected:%" PRIu64 " actual:%" PRIu64 "", + num, num_free); +} + +/* + * pool_create() helper: sort subpool cfg in ascending order based on buf size + */ +static void +sort_pool_cfg(const em_pool_cfg_t *pool_cfg, em_pool_cfg_t *sorted_cfg /*out*/) +{ + const int num_subpools = pool_cfg->num_subpools; + + *sorted_cfg = *pool_cfg; + + for (int i = 0; i < num_subpools - 1; i++) { + int idx = i; /* array index containing smallest size */ + + for (int j = i + 1; j < num_subpools; j++) { + if (sorted_cfg->subpool[j].size < + sorted_cfg->subpool[idx].size) + idx = j; /* store idx to smallest */ + } + + /* min size at [idx], swap with [i] */ + if (idx != i) { + uint32_t size = sorted_cfg->subpool[i].size; + uint32_t num = sorted_cfg->subpool[i].num; + uint32_t cache_size = sorted_cfg->subpool[i].cache_size; + + sorted_cfg->subpool[i] = sorted_cfg->subpool[idx]; + + sorted_cfg->subpool[idx].size = size; + sorted_cfg->subpool[idx].num = num; + sorted_cfg->subpool[idx].cache_size = cache_size; + } + } +} + +/* + * pool_create() helper: set pool event-cache size. + * + * Set the requested subpool cache-size based on user provided value and + * limit set by odp-pool-capability. + * Requested value can be larger than odp-max, use odp--max in this + * case. + * Verification against odp-min value done in invalid_pool_cfg(). + */ +static void +set_poolcache_size(em_pool_cfg_t *pool_cfg) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + int num_subpools = pool_cfg->num_subpools; + uint32_t max_cache_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_SW) + max_cache_size = capa->buf.max_cache_size; + else if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) + max_cache_size = capa->pkt.max_cache_size; + else /* EM_EVENT_TYPE_VECTOR */ + max_cache_size = capa->vector.max_cache_size; + + for (int i = 0; i < num_subpools; i++) { + if (max_cache_size < pool_cfg->subpool[i].cache_size) + pool_cfg->subpool[i].cache_size = max_cache_size; + } +} + +/* + * pool_create() helper: determine payload alignment. + */ +static int +set_align(const em_pool_cfg_t *pool_cfg, + uint32_t *align_offset /*out*/, uint32_t *odp_align /*out*/) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + uint32_t offset = 0; + uint32_t align = ODP_CACHE_LINE_SIZE; + + /* Pool-specific param overrides config file 'align_offset' value */ + if (pool_cfg->align_offset.in_use) + offset = pool_cfg->align_offset.value; /* pool cfg */ + else + offset = em_shm->opt.pool.align_offset; /* cfg file */ + + /* Set subpool minimum alignment */ + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { + if (align > capa->pkt.max_align) + align = capa->pkt.max_align; + } else { + if (align > capa->buf.max_align) + align = capa->buf.max_align; + } + + *align_offset = offset; + *odp_align = align; + + /* verify alignment requirements */ + if (!POWEROF2(align) || align <= offset) + return -1; + + return 0; +} + +/* + * pool_create() helper: determine user area size. + */ +static int +set_uarea_size(const em_pool_cfg_t *pool_cfg, size_t *uarea_size/*out*/) +{ + size_t size = 0; + size_t max_size = 0; + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + + if (pool_cfg->user_area.in_use) /* use pool-cfg */ + size = pool_cfg->user_area.size; + else /* use cfg-file */ + size = em_shm->opt.pool.user_area_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) + max_size = MIN(capa->pkt.max_uarea_size, EM_EVENT_USER_AREA_MAX_SIZE); + else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) + max_size = MIN(capa->vector.max_uarea_size, EM_EVENT_USER_AREA_MAX_SIZE); + else if (size > 0) /* EM_EVENT_TYPE_SW: bufs */ + max_size = MIN(capa->buf.max_uarea_size, EM_EVENT_USER_AREA_MAX_SIZE); + + if (size > max_size) + return -1; + + *uarea_size = size; + + return 0; +} + +/* + * pool_create() helper: set the pkt headroom + */ +static int +set_pkt_headroom(const em_pool_cfg_t *pool_cfg, + uint32_t *pkt_headroom /*out*/, + uint32_t *max_headroom /*out, for err print only*/) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + /* default value from cfg file */ + uint32_t headroom = em_shm->opt.pool.pkt_headroom; + + /* Pool-specific param overrides config file value */ + if (pool_cfg->pkt.headroom.in_use) + headroom = pool_cfg->pkt.headroom.value; + + *pkt_headroom = headroom; + *max_headroom = capa->pkt.max_headroom; + + if (unlikely(headroom > capa->pkt.max_headroom)) + return -1; + + return 0; +} + +/** Helper to create_subpools() */ +static void set_pool_params_stats(odp_pool_stats_opt_t *param_stats /*out*/, + const odp_pool_stats_opt_t *capa_stats, + const em_pool_stats_opt_t *stats_opt) +{ + param_stats->all = 0; + + if (capa_stats->bit.available) + param_stats->bit.available = stats_opt->available; + + if (capa_stats->bit.alloc_ops) + param_stats->bit.alloc_ops = stats_opt->alloc_ops; + + if (capa_stats->bit.alloc_fails) + param_stats->bit.alloc_fails = stats_opt->alloc_fails; + + if (capa_stats->bit.free_ops) + param_stats->bit.free_ops = stats_opt->free_ops; + + if (capa_stats->bit.total_ops) + param_stats->bit.total_ops = stats_opt->total_ops; + + if (capa_stats->bit.cache_alloc_ops) + param_stats->bit.cache_alloc_ops = stats_opt->cache_alloc_ops; + + if (capa_stats->bit.cache_available) + param_stats->bit.cache_available = stats_opt->cache_available; + + if (capa_stats->bit.cache_free_ops) + param_stats->bit.cache_free_ops = stats_opt->cache_free_ops; + + if (capa_stats->bit.thread_cache_available) + param_stats->bit.thread_cache_available = stats_opt->core_cache_available; +} + +/** Helper to create_subpools() */ +static void set_pool_params_pkt(odp_pool_param_t *pool_params /* out */, + const em_pool_cfg_t *pool_cfg, + uint32_t size, uint32_t num, uint32_t cache_size, + uint32_t align_offset, uint32_t odp_align, + uint32_t uarea_size, uint32_t pkt_headroom) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + odp_pool_param_init(pool_params); + + pool_params->type = ODP_POOL_PACKET; + /* num == max_num, helps pool-info stats calculation */ + pool_params->pkt.num = num; + pool_params->pkt.max_num = num; + + if (size > align_offset) + size = size - align_offset; + else + size = 1; /* 0:default, can be big => use 1 */ + /* len == max_len */ + pool_params->pkt.len = size; + pool_params->pkt.max_len = size; + pool_params->pkt.seg_len = size; + pool_params->pkt.align = odp_align; + /* + * Reserve space for the event header in each packet's + * ODP-user-area: + */ + pool_params->pkt.uarea_size = sizeof(event_hdr_t) + uarea_size; + /* + * Set the pkt headroom. + * Make sure the alloc-alignment fits into the headroom. + */ + pool_params->pkt.headroom = pkt_headroom; + if (pkt_headroom < align_offset) + pool_params->pkt.headroom = align_offset; + + pool_params->pkt.cache_size = cache_size; + + /* Pkt pool statistics */ + if (pool_cfg->stats_opt.in_use) { + set_pool_params_stats(&pool_params->stats, &capa->pkt.stats, + &pool_cfg->stats_opt.opt); + } else { + set_pool_params_stats(&pool_params->stats, &capa->pkt.stats, + &em_shm->opt.pool.statistics);/*from cnf file*/ + } +} + +static void set_pool_params_vector(odp_pool_param_t *pool_params /* out */, + const em_pool_cfg_t *pool_cfg, + uint32_t size, uint32_t num, + uint32_t cache_size, uint32_t uarea_size) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + odp_pool_param_init(pool_params); + + pool_params->type = ODP_POOL_VECTOR; + pool_params->vector.num = num; + pool_params->vector.max_size = size; + /* Reserve space for the EM event header in the vector's ODP-user-area */ + pool_params->vector.uarea_size = sizeof(event_hdr_t) + uarea_size; + pool_params->vector.cache_size = cache_size; + + /* Vector pool statistics */ + if (pool_cfg->stats_opt.in_use) + set_pool_params_stats(&pool_params->stats, &capa->vector.stats, + &pool_cfg->stats_opt.opt); + else + set_pool_params_stats(&pool_params->stats, &capa->vector.stats, + &em_shm->opt.pool.statistics); +} + +/** Helper to create_subpools() */ +static void set_pool_params_buf(odp_pool_param_t *pool_params /* out */, + const em_pool_cfg_t *pool_cfg, + uint32_t size, uint32_t num, uint32_t cache_size, + uint32_t align_offset, uint32_t odp_align, + uint32_t uarea_size) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + odp_pool_param_init(pool_params); + + pool_params->type = ODP_POOL_BUFFER; + pool_params->buf.num = num; + pool_params->buf.size = size; + if (align_offset) + pool_params->buf.size += 32 - align_offset; + pool_params->buf.align = odp_align; + pool_params->buf.uarea_size = sizeof(event_hdr_t) + uarea_size; + pool_params->buf.cache_size = cache_size; + + /* Buf pool statistics */ + if (pool_cfg->stats_opt.in_use) + set_pool_params_stats(&pool_params->stats, &capa->buf.stats, + &pool_cfg->stats_opt.opt); + else + set_pool_params_stats(&pool_params->stats, &capa->buf.stats, + &em_shm->opt.pool.statistics); +} + +static int +create_subpools(const em_pool_cfg_t *pool_cfg, + uint32_t align_offset, uint32_t odp_align, + uint32_t uarea_size, uint32_t pkt_headroom, + mpool_elem_t *mpool_elem /*out*/) +{ + const int num_subpools = pool_cfg->num_subpools; + mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; + + for (int i = 0; i < num_subpools; i++) { + char pool_name[ODP_POOL_NAME_LEN]; + odp_pool_param_t pool_params; + uint32_t size = pool_cfg->subpool[i].size; + uint32_t num = pool_cfg->subpool[i].num; + uint32_t cache_size = pool_cfg->subpool[i].cache_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { + set_pool_params_pkt(&pool_params /* out */, pool_cfg, + size, num, cache_size, + align_offset, odp_align, + uarea_size, pkt_headroom); + } else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) { + set_pool_params_vector(&pool_params /* out */, pool_cfg, + size, num, cache_size, + uarea_size); + } else { /* pool_cfg->event_type == EM_EVENT_TYPE_SW */ + set_pool_params_buf(&pool_params /* out */, pool_cfg, + size, num, cache_size, + align_offset, odp_align, uarea_size); + } + + mpool_elem->size[i] = pool_cfg->subpool[i].size; + mpool_elem->stats_opt = pool_params.stats; + + snprintf(pool_name, sizeof(pool_name), "%" PRI_POOL ":%d-%s", + mpool_elem->em_pool, i, mpool_elem->name); + pool_name[sizeof(pool_name) - 1] = '\0'; + + odp_pool_t odp_pool = odp_pool_create(pool_name, &pool_params); + + if (unlikely(odp_pool == ODP_POOL_INVALID)) + return -1; + + mpool_elem->odp_pool[i] = odp_pool; + mpool_elem->num_subpools++; /* created subpools for delete */ + + int odp_pool_idx = odp_pool_index(odp_pool); + + if (unlikely(odp_pool_idx < 0)) + return -2; + + /* Store mapping from odp-pool (idx) to em-pool & subpool */ + mpool_tbl->pool_subpool_odp2em[odp_pool_idx].pool = + (uint32_t)(uintptr_t)mpool_elem->em_pool; + mpool_tbl->pool_subpool_odp2em[odp_pool_idx].subpool = i; + + /* odp_pool_print(odp_pool); */ + } + + return 0; +} + +em_pool_t +pool_create(const char *name, em_pool_t req_pool, const em_pool_cfg_t *pool_cfg) +{ + const em_event_type_t pool_evtype = pool_cfg->event_type; + int err = 0; + + /* Allocate a free EM pool */ + const em_pool_t pool = pool_alloc(req_pool/* requested or undef*/); + + if (unlikely(pool == EM_POOL_UNDEF)) + return EM_POOL_UNDEF; + + mpool_elem_t *mpool_elem = pool_elem_get(pool); + + /* Sanity check */ + if (!mpool_elem || mpool_elem->em_pool != pool) + return EM_POOL_UNDEF; + + mpool_elem->event_type = pool_evtype; + /* Store successfully created subpools later */ + mpool_elem->num_subpools = 0; + /* Store the event pool name, if given */ + if (name && *name) { + strncpy(mpool_elem->name, name, sizeof(mpool_elem->name)); + mpool_elem->name[sizeof(mpool_elem->name) - 1] = '\0'; + } else { + mpool_elem->name[0] = '\0'; + } + + em_pool_cfg_t sorted_cfg; + + /* + * Sort the subpool cfg in ascending order based on the buffer size + */ + sort_pool_cfg(pool_cfg, &sorted_cfg/*out*/); + /* Use sorted_cfg instead of pool_cfg from here on */ + + /* + * Set the cache-size of each subpool in the EM-pool + */ + set_poolcache_size(&sorted_cfg); + + /* Store the sorted config */ + mpool_elem->pool_cfg = sorted_cfg; + + /* + * Event payload alignment requirement for the pool + */ + uint32_t align_offset = 0; + uint32_t odp_align = 0; + + /* align only valid for bufs and pkts */ + if (pool_evtype == EM_EVENT_TYPE_SW || + pool_evtype == EM_EVENT_TYPE_PACKET) { + err = set_align(&sorted_cfg, &align_offset/*out*/, + &odp_align/*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" align mismatch:\n" + "align:%u cfg:align_offset:%u", + name, odp_align, align_offset); + goto error; + } + } + /* store the align offset, needed in pkt-alloc */ + mpool_elem->align_offset = align_offset; + + /* + * Event user area size. + * Pool-specific param overrides config file 'user_area_size' value + */ + size_t uarea_size = 0; + + err = set_uarea_size(&sorted_cfg, &uarea_size/*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" invalid uarea config: req.size:%zu", + name, uarea_size); + goto error; + } + + /* store the user_area sizes, needed in alloc */ + mpool_elem->user_area.size = uarea_size & UINT16_MAX; + + /* + * Set the headroom for events in EM packet pools + */ + uint32_t pkt_headroom = 0; + uint32_t max_headroom = 0; + + if (pool_evtype == EM_EVENT_TYPE_PACKET) { + err = set_pkt_headroom(&sorted_cfg, &pkt_headroom/*out*/, + &max_headroom/*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" invalid pkt headroom:\n" + "headroom:%u vs. max:headroom:%u", + name, pkt_headroom, max_headroom); + goto error; + } + } + + /* + * Create the subpools for the EM event-pool. + * Each EM subpool is an ODP pool. + */ + err = create_subpools(&sorted_cfg, align_offset, odp_align, + (uint32_t)uarea_size, pkt_headroom, mpool_elem /*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_ALLOC_FAILED), + EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" create fails:%d\n" + "subpools req:%d vs. subpools created:%d", + name, err, sorted_cfg.num_subpools, + mpool_elem->num_subpools); + goto error; + } + + /* + * ESV: preallocate all events in the pool + */ + if (esv_enabled() && em_shm->opt.esv.prealloc_pools) + pool_prealloc(mpool_elem); + + /* Success! */ + return mpool_elem->em_pool; + +error: + (void)pool_delete(pool); + return EM_POOL_UNDEF; +} + +em_status_t +pool_delete(em_pool_t pool) +{ + mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; + mpool_elem_t *const mpool_elem = pool_elem_get(pool); + + if (unlikely(mpool_elem == NULL || !pool_allocated(mpool_elem))) + return EM_ERR_BAD_ARG; + + for (int i = 0; i < mpool_elem->num_subpools; i++) { + odp_pool_t odp_pool = mpool_elem->odp_pool[i]; + int odp_pool_idx; + int ret; + + if (odp_pool == ODP_POOL_INVALID) + return EM_ERR_NOT_FOUND; + + odp_pool_idx = odp_pool_index(odp_pool); + + ret = odp_pool_destroy(odp_pool); + if (unlikely(ret)) + return EM_ERR_LIB_FAILED; + + mpool_elem->odp_pool[i] = ODP_POOL_INVALID; + mpool_elem->size[i] = 0; + + /* Clear mapping from odp-pool (idx) to em-pool & subpool */ + if (unlikely(odp_pool_idx < 0)) + return EM_ERR_BAD_ID; + mpool_tbl->pool_subpool_odp2em[odp_pool_idx].both = pool_subpool_undef.both; + } + + mpool_elem->name[0] = '\0'; + mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; + mpool_elem->num_subpools = 0; + + return pool_free(pool); +} + +em_pool_t +pool_find(const char *name) +{ + if (name && *name) { + for (int i = 0; i < EM_CONFIG_POOLS; i++) { + const mpool_elem_t *mpool_elem = + &em_shm->mpool_tbl.pool[i]; + + if (pool_allocated(mpool_elem) && + !strncmp(name, mpool_elem->name, EM_POOL_NAME_LEN)) + return mpool_elem->em_pool; + } + } + + return EM_POOL_UNDEF; +} + +unsigned int +pool_count(void) +{ + return env_atomic32_get(&em_shm->pool_count); +} + +#define POOL_INFO_HDR_STR \ +" id name type offset uarea sizes [size count(used/free) cache]\n" + +#define POOL_INFO_SUBSTR_FMT \ +"%d:[sz=%" PRIu32 " n=%" PRIu32 "(%" PRIu32 "/%" PRIu32 ") $=%" PRIu32 "]" + +#define POOL_INFO_SUBSTR_NO_STATS_FMT \ +"%d:[sz=%" PRIu32 " n=%" PRIu32 "(-/-) cache=%" PRIu32 "]" + +void pool_info_print_hdr(unsigned int num_pools) +{ + if (num_pools == 1) { + EM_PRINT("EM Event Pool\n" + "-------------\n" + POOL_INFO_HDR_STR); + } else { + EM_PRINT("EM Event Pools:%2u\n" + "-----------------\n" + POOL_INFO_HDR_STR, num_pools); + } +} + +void pool_info_print(em_pool_t pool) +{ + em_pool_info_t pool_info; + em_status_t stat; + const char *pool_type; + + stat = em_pool_info(pool, &pool_info/*out*/); + if (unlikely(stat != EM_OK)) { + EM_PRINT(" %-6" PRI_POOL " %-16s n/a n/a n/a n/a [n/a]\n", + pool, "err:n/a"); + return; + } + + if (pool_info.event_type == EM_EVENT_TYPE_VECTOR) + pool_type = "vec"; + else if (pool_info.event_type == EM_EVENT_TYPE_PACKET) + pool_type = "pkt"; + else + pool_type = "buf"; + + EM_PRINT(" %-6" PRI_POOL " %-16s %4s %02u %02zu %02u ", + pool, pool_info.name, pool_type, + pool_info.align_offset, pool_info.user_area_size, + pool_info.num_subpools); + + for (int i = 0; i < pool_info.num_subpools; i++) { + char subpool_str[42]; + + if (pool_info.subpool[i].used || pool_info.subpool[i].free) { + snprintf(subpool_str, sizeof(subpool_str), + POOL_INFO_SUBSTR_FMT, i, + pool_info.subpool[i].size, + pool_info.subpool[i].num, + pool_info.subpool[i].used, + pool_info.subpool[i].free, + pool_info.subpool[i].cache_size); + } else { + snprintf(subpool_str, sizeof(subpool_str), + POOL_INFO_SUBSTR_NO_STATS_FMT, i, + pool_info.subpool[i].size, + pool_info.subpool[i].num, + pool_info.subpool[i].cache_size); + } + subpool_str[sizeof(subpool_str) - 1] = '\0'; + EM_PRINT(" %-42s", subpool_str); + } + + EM_PRINT("\n"); +} + +#define POOL_STATS_HDR_STR \ +"EM pool statistics for pool %" PRI_POOL ":\n\n"\ +"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ +" Cache_alloc_ops Cache_free_ops\n"\ +"--------------------------------------------------------------------------" \ +"-------------------------------\n%s" + +#define POOL_STATS_LEN 107 +#define POOL_STATS_FMT "%-8u%-10lu%-10lu%-12lu%-9lu%-10lu%-16lu%-16lu%-15lu\n" + +void pool_stats_print(em_pool_t pool) +{ + em_status_t stat; + em_pool_stats_t pool_stats; + const em_pool_subpool_stats_t *subpool_stats; + int len = 0; + int n_print = 0; + const mpool_elem_t *pool_elem = pool_elem_get(pool); + const int stats_str_len = EM_MAX_SUBPOOLS * POOL_STATS_LEN + 1; + char stats_str[stats_str_len]; + + if (pool_elem == NULL || !pool_allocated(pool_elem)) { + EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); + return; + } + + stat = em_pool_stats(pool, &pool_stats); + if (unlikely(stat != EM_OK)) { + EM_PRINT("Failed to fetch EM pool statistics\n"); + return; + } + + for (uint32_t i = 0; i < pool_stats.num_subpools; i++) { + subpool_stats = &pool_stats.subpool_stats[i]; + n_print = snprintf(stats_str + len, stats_str_len - len, + POOL_STATS_FMT, + i, subpool_stats->available, + subpool_stats->alloc_ops, + subpool_stats->alloc_fails, + subpool_stats->free_ops, + subpool_stats->total_ops, + subpool_stats->cache_available, + subpool_stats->cache_alloc_ops, + subpool_stats->cache_free_ops); + + /* Not enough space to hold more subpool stats */ + if (n_print >= stats_str_len - len) + break; + + len += n_print; + } + + stats_str[len] = '\0'; + EM_PRINT(POOL_STATS_HDR_STR, pool, stats_str); +} + +#define POOL_STATS_SELECTED_HDR_STR \ +"Selected EM pool statistics for pool %" PRI_POOL ":\n\n"\ +"Selected statistic counters: %s\n\n"\ +"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ +" Cache_alloc_ops Cache_free_ops\n"\ +"--------------------------------------------------------------------------" \ +"-------------------------------\n%s" + +#define OPT_STR_LEN 150 + +static void fill_opt_str(char *opt_str, const em_pool_stats_opt_t *opt) +{ + int n_print; + int len = 0; + + if (opt->available) { + n_print = snprintf(opt_str + len, 12, "%s", "available"); + len += n_print; + } + + if (opt->alloc_ops) { + n_print = snprintf(opt_str + len, 12, "%s", len ? ", alloc_ops" : "alloc_ops"); + len += n_print; + } + + if (opt->alloc_fails) { + n_print = snprintf(opt_str + len, 14, "%s", len ? ", alloc_fails" : "alloc_fails"); + len += n_print; + } + + if (opt->free_ops) { + n_print = snprintf(opt_str + len, 11, "%s", len ? ", free_ops" : "free_ops"); + len += n_print; + } + + if (opt->total_ops) { + n_print = snprintf(opt_str + len, 12, "%s", len ? ", total_ops" : "total_ops"); + len += n_print; + } + + if (opt->cache_available) { + n_print = snprintf(opt_str + len, 18, "%s", + len ? ", cache_available" : "cache_available"); + len += n_print; + } + + if (opt->cache_alloc_ops) { + n_print = snprintf(opt_str + len, 18, "%s", + len ? ", cache_alloc_ops" : "cache_alloc_ops"); + len += n_print; + } + + if (opt->cache_free_ops) + snprintf(opt_str + len, 17, "%s", len ? ", cache_free_ops" : "cache_free_ops"); +} + +void pool_stats_selected_print(em_pool_t pool, const em_pool_stats_opt_t *opt) +{ + em_status_t stat; + em_pool_stats_selected_t pool_stats = {0}; + const em_pool_subpool_stats_selected_t *subpool_stats; + int len = 0; + int n_print = 0; + const mpool_elem_t *pool_elem = pool_elem_get(pool); + char opt_str[OPT_STR_LEN]; + const int stats_str_len = EM_MAX_SUBPOOLS * POOL_STATS_LEN + 1; + char stats_str[stats_str_len]; + + if (pool_elem == NULL || !pool_allocated(pool_elem)) { + EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); + return; + } + + stat = em_pool_stats_selected(pool, &pool_stats, opt); + if (unlikely(stat != EM_OK)) { + EM_PRINT("Failed to fetch EM selected pool statistics\n"); + return; + } + + for (uint32_t i = 0; i < pool_stats.num_subpools; i++) { + subpool_stats = &pool_stats.subpool_stats[i]; + + n_print = snprintf(stats_str + len, stats_str_len - len, + POOL_STATS_FMT, + i, + subpool_stats->available, + subpool_stats->alloc_ops, + subpool_stats->alloc_fails, + subpool_stats->free_ops, + subpool_stats->total_ops, + subpool_stats->cache_available, + subpool_stats->cache_alloc_ops, + subpool_stats->cache_free_ops); + + /* Not enough space to hold more subpool stats */ + if (n_print >= stats_str_len - len) + break; + + len += n_print; + } + stats_str[len] = '\0'; + + /* Fill selected statistic counters */ + fill_opt_str(opt_str, opt); + + EM_PRINT(POOL_STATS_SELECTED_HDR_STR, pool, opt_str, stats_str); +} + +#define SUBPOOL_STATS_HDR_STR \ +"EM subpool statistics for pool %" PRI_POOL ":\n\n"\ +"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ +" Cache_alloc_ops Cache_free_ops\n"\ +"--------------------------------------------------------------------------" \ +"-------------------------------\n%s" + +void subpools_stats_print(em_pool_t pool, const int subpools[], int num_subpools) +{ + int num_stats; + em_pool_subpool_stats_t stats[num_subpools]; + int len = 0; + int n_print = 0; + const mpool_elem_t *pool_elem = pool_elem_get(pool); + const int stats_str_len = num_subpools * POOL_STATS_LEN + 1; + char stats_str[stats_str_len]; + + if (pool_elem == NULL || !pool_allocated(pool_elem)) { + EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); + return; + } + + num_stats = em_pool_subpool_stats(pool, subpools, num_subpools, stats); + if (unlikely(!num_stats || num_stats > num_subpools)) { + EM_LOG(EM_LOG_ERR, "Failed to fetch subpool statistics\n"); + return; + } + + /* Print subpool stats */ + for (int i = 0; i < num_stats; i++) { + n_print = snprintf(stats_str + len, stats_str_len - len, + POOL_STATS_FMT, + subpools[i], stats[i].available, stats[i].alloc_ops, + stats[i].alloc_fails, stats[i].free_ops, + stats[i].total_ops, stats[i].cache_available, + stats[i].cache_alloc_ops, stats[i].cache_free_ops); + + /* Not enough space to hold more subpool stats */ + if (n_print >= stats_str_len - len) + break; + + len += n_print; + } + + stats_str[len] = '\0'; + EM_PRINT(SUBPOOL_STATS_HDR_STR, pool, stats_str); +} + +#define SUBPOOL_STATS_SELECTED_HDR_STR \ +"Selected EM subpool statistics for pool %" PRI_POOL ":\n\n"\ +"Selected statistic counters: %s\n\n"\ +"Subpool Available Alloc_ops Alloc_fails Free_ops Total_ops Cache_available" \ +" Cache_alloc_ops Cache_free_ops\n"\ +"--------------------------------------------------------------------------" \ +"-------------------------------\n%s" + +void subpools_stats_selected_print(em_pool_t pool, const int subpools[], + int num_subpools, const em_pool_stats_opt_t *opt) +{ + int num_stats; + char opt_str[OPT_STR_LEN]; + em_pool_subpool_stats_selected_t stats[num_subpools]; + int len = 0; + int n_print = 0; + const mpool_elem_t *pool_elem = pool_elem_get(pool); + const int stats_str_len = num_subpools * POOL_STATS_LEN + 1; + char stats_str[stats_str_len]; + + if (pool_elem == NULL || !pool_allocated(pool_elem)) { + EM_LOG(EM_LOG_ERR, "EM-pool:%" PRI_POOL " invalid\n", pool); + return; + } + + memset(stats, 0, sizeof(stats)); + num_stats = em_pool_subpool_stats_selected(pool, subpools, num_subpools, stats, opt); + if (unlikely(!num_stats || num_stats > num_subpools)) { + EM_LOG(EM_LOG_ERR, "Failed to fetch selected subpool statistics\n"); + return; + } + + /* Print subpool stats */ + for (int i = 0; i < num_stats; i++) { + n_print = snprintf(stats_str + len, stats_str_len - len, + POOL_STATS_FMT, + subpools[i], stats[i].available, stats[i].alloc_ops, + stats[i].alloc_fails, stats[i].free_ops, + stats[i].total_ops, stats[i].cache_available, + stats[i].cache_alloc_ops, stats[i].cache_free_ops); + + /* Not enough space to hold more subpool stats */ + if (n_print >= stats_str_len - len) + break; + + len += n_print; + } + stats_str[len] = '\0'; + + /* Fill selected statistic counters */ + fill_opt_str(opt_str, opt); + EM_PRINT(SUBPOOL_STATS_SELECTED_HDR_STR, pool, opt_str, stats_str); +} + +void print_pool_elem_info(void) +{ + EM_PRINT("\n" + "pool-elem size: %zu B\n", + sizeof(mpool_elem_t)); + + EM_DBG("\t\toffset\tsize\n" + "\t\t------\t-----\n" + "event_type:\t%3zu B\t%3zu B\n" + "align_offset:\t%3zu B\t%3zu B\n" + "user_area info:\t%3zu B\t%3zu B\n" + "num_subpools:\t%3zu B\t%3zu B\n" + "size[]:\t\t%3zu B\t%3zu B\n" + "odp_pool[]:\t%3zu B\t%3zu B\n" + "em_pool:\t%3zu B\t%3zu B\n" + "objpool_elem:\t%3zu B\t%3zu B\n" + "stats_opt:\t%3zu B\t%3zu B\n" + "pool_cfg:\t%3zu B\t%3zu B\n" + "name[]:\t\t%3zu B\t%3zu B\n", + offsetof(mpool_elem_t, event_type), sizeof_field(mpool_elem_t, event_type), + offsetof(mpool_elem_t, align_offset), sizeof_field(mpool_elem_t, align_offset), + offsetof(mpool_elem_t, user_area), sizeof_field(mpool_elem_t, user_area), + offsetof(mpool_elem_t, num_subpools), sizeof_field(mpool_elem_t, num_subpools), + offsetof(mpool_elem_t, size), sizeof_field(mpool_elem_t, size), + offsetof(mpool_elem_t, odp_pool), sizeof_field(mpool_elem_t, odp_pool), + offsetof(mpool_elem_t, em_pool), sizeof_field(mpool_elem_t, em_pool), + offsetof(mpool_elem_t, objpool_elem), sizeof_field(mpool_elem_t, objpool_elem), + offsetof(mpool_elem_t, stats_opt), sizeof_field(mpool_elem_t, stats_opt), + offsetof(mpool_elem_t, pool_cfg), sizeof_field(mpool_elem_t, pool_cfg), + offsetof(mpool_elem_t, name), sizeof_field(mpool_elem_t, name)); + + EM_PRINT("\n"); +} diff --git a/src/event_machine_dispatcher.c b/src/event_machine_dispatcher.c index 1e6984b1..f5c4d775 100644 --- a/src/event_machine_dispatcher.c +++ b/src/event_machine_dispatcher.c @@ -1,345 +1,345 @@ -/* - * Copyright (c) 2015-2023, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" -#include "em_dispatcher_inline.h" - -static const em_dispatch_opt_t dispatch_opt_default = { - .burst_size = EM_SCHED_MULTI_MAX_BURST, - .__internal_check = EM_CHECK_INIT_CALLED - /* other members initialized to 0 or NULL as per C standard */ -}; - -uint64_t em_dispatch(uint64_t rounds /* 0 = forever */) -{ - uint64_t events; - - em_locm_t *const locm = &em_locm; - const bool do_input_poll = locm->do_input_poll; - const bool do_output_drain = locm->do_output_drain; - const bool do_schedule_pause = em_shm->opt.dispatch.sched_pause; - - if (locm->is_sched_paused) { - odp_schedule_resume(); - locm->is_sched_paused = false; - } - - if (do_input_poll || do_output_drain) - events = dispatch_with_userfn(rounds, do_input_poll, do_output_drain); - else - events = dispatch_no_userfn(rounds); - - if (do_schedule_pause) { - /* pause scheduling before exiting the dispatch loop */ - int round_events; - - odp_schedule_pause(); - locm->is_sched_paused = true; - - /* empty the locally pre-scheduled events (if any) */ - do { - round_events = dispatch_round(ODP_SCHED_NO_WAIT, - EM_SCHED_MULTI_MAX_BURST, NULL); - events += round_events; - } while (round_events > 0); - } - - return events; -} - -void em_dispatch_opt_init(em_dispatch_opt_t *opt) -{ - if (unlikely(!opt)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_OPT_INIT, - "Bad argument, opt=NULL"); - return; - } - - *opt = dispatch_opt_default; -} - -static inline em_status_t -dispatch_duration(const em_dispatch_duration_t *duration, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out, optional*/) -{ - em_locm_t *const locm = &em_locm; - const bool do_input_poll = locm->do_input_poll && !opt->skip_input_poll; - const bool do_output_drain = locm->do_output_drain && !opt->skip_output_drain; - const bool do_sched_pause = opt->sched_pause; - uint64_t events; - - if (locm->is_sched_paused) { - odp_schedule_resume(); - locm->is_sched_paused = false; - } - - if (do_input_poll || do_output_drain) - events = dispatch_duration_with_userfn(duration, opt, results, - do_input_poll, do_output_drain); - else - events = dispatch_duration_no_userfn(duration, opt, results); - - /* pause scheduling before exiting the dispatch loop */ - if (do_sched_pause) { - odp_schedule_pause(); - locm->is_sched_paused = true; - - int round_events; - uint64_t rounds = 0; - uint16_t burst_size = opt->burst_size; - - /* empty the locally pre-scheduled events (if any) */ - do { - round_events = dispatch_round(ODP_SCHED_NO_WAIT, - burst_size, opt); - events += round_events; - rounds++; - } while (round_events > 0); - - if (results) { - results->rounds += rounds; - results->events = events; - } - } - - return EM_OK; -} - -em_status_t em_dispatch_duration(const em_dispatch_duration_t *duration, - const em_dispatch_opt_t *opt /* optional */, - em_dispatch_results_t *results /*out, optional*/) -{ - RETURN_ERROR_IF(!duration, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, - "Bad argument: duration=NULL"); - - if (!opt) { - opt = &dispatch_opt_default; - } else { - RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, - "Bad argument: em_dispatch_opt_init(opt) not called"); - } - - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, - "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", - opt->burst_size, EM_SCHED_MULTI_MAX_BURST); - } - - if (EM_CHECK_LEVEL > 1) { - /* _FLAG_LAST is 'pow2 + 1' */ - const em_dispatch_duration_select_t next_pow2 = - (EM_DISPATCH_DURATION_LAST >> 1) << 2; - - RETURN_ERROR_IF(duration->select >= next_pow2, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, - "Bad option: duration->select=0x%x invalid", duration->select); - RETURN_ERROR_IF(((duration->select & EM_DISPATCH_DURATION_ROUNDS && - duration->rounds == 0) || - (duration->select & EM_DISPATCH_DURATION_NS && - duration->ns == 0) || - (duration->select & EM_DISPATCH_DURATION_EVENTS && - duration->events == 0) || - (duration->select & EM_DISPATCH_DURATION_NO_EVENTS_ROUNDS && - duration->no_events.rounds == 0) || - (duration->select & EM_DISPATCH_DURATION_NO_EVENTS_NS && - duration->no_events.ns == 0)), - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, - "Bad option: opt.duration is zero(0)."); - } - - return dispatch_duration(duration, opt, results); -} - -em_status_t em_dispatch_ns(uint64_t ns, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out*/) -{ - RETURN_ERROR_IF(ns == 0, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_NS, - "Bad argument: ns=0"); - - if (!opt) { - opt = &dispatch_opt_default; - } else { - RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_NS, - "Bad argument: em_dispatch_opt_init(opt) not called"); - } - - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_NS, - "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", - opt->burst_size, EM_SCHED_MULTI_MAX_BURST); - } - - const em_dispatch_duration_t duration = { - .select = EM_DISPATCH_DURATION_NS, - .ns = ns - }; - - return dispatch_duration(&duration, opt, results); -} - -em_status_t em_dispatch_events(uint64_t events, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out*/) -{ - RETURN_ERROR_IF(events == 0, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_EVENTS, - "Bad argument: events=0"); - - if (!opt) { - opt = &dispatch_opt_default; - } else { - RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_EVENTS, - "Bad argument: em_dispatch_opt_init(opt) not called"); - } - - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_EVENTS, - "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", - opt->burst_size, EM_SCHED_MULTI_MAX_BURST); - } - - const em_dispatch_duration_t duration = { - .select = EM_DISPATCH_DURATION_EVENTS, - .events = events - }; - - return dispatch_duration(&duration, opt, results); -} - -em_status_t em_dispatch_rounds(uint64_t rounds, - const em_dispatch_opt_t *opt, - em_dispatch_results_t *results /*out*/) -{ - RETURN_ERROR_IF(rounds == 0, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_ROUNDS, - "Bad argument: rounds=0"); - - if (!opt) { - opt = &dispatch_opt_default; - } else { - RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_ROUNDS, - "Bad argument: em_dispatch_opt_init(opt) not called"); - } - - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, - EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_ROUNDS, - "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", - opt->burst_size, EM_SCHED_MULTI_MAX_BURST); - } - - const em_dispatch_duration_t duration = { - .select = EM_DISPATCH_DURATION_ROUNDS, - .rounds = rounds - }; - - return dispatch_duration(&duration, opt, results); -} - -em_status_t -em_dispatch_register_enter_cb(em_dispatch_enter_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_enter = func; - stat = hook_register(DISPATCH_CALLBACK_ENTER, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, - "Dispatch callback register failed"); - - return EM_OK; -} - -em_status_t -em_dispatch_unregister_enter_cb(em_dispatch_enter_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_enter = func; - stat = hook_unregister(DISPATCH_CALLBACK_ENTER, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, - "Dispatch callback unregister failed"); - - return EM_OK; -} - -em_status_t -em_dispatch_register_exit_cb(em_dispatch_exit_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_exit = func; - stat = hook_register(DISPATCH_CALLBACK_EXIT, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, - "Dispatch callback register failed"); - return EM_OK; -} - -em_status_t -em_dispatch_unregister_exit_cb(em_dispatch_exit_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_exit = func; - stat = hook_unregister(DISPATCH_CALLBACK_EXIT, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, - "Dispatch callback unregister failed"); - return EM_OK; -} +/* + * Copyright (c) 2015-2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" +#include "em_dispatcher_inline.h" + +static const em_dispatch_opt_t dispatch_opt_default = { + .burst_size = EM_SCHED_MULTI_MAX_BURST, + .__internal_check = EM_CHECK_INIT_CALLED + /* other members initialized to 0 or NULL as per C standard */ +}; + +uint64_t em_dispatch(uint64_t rounds /* 0 = forever */) +{ + uint64_t events; + + em_locm_t *const locm = &em_locm; + const bool do_input_poll = locm->do_input_poll; + const bool do_output_drain = locm->do_output_drain; + const bool do_schedule_pause = em_shm->opt.dispatch.sched_pause; + + if (locm->is_sched_paused) { + odp_schedule_resume(); + locm->is_sched_paused = false; + } + + if (do_input_poll || do_output_drain) + events = dispatch_with_userfn(rounds, do_input_poll, do_output_drain); + else + events = dispatch_no_userfn(rounds); + + if (do_schedule_pause) { + /* pause scheduling before exiting the dispatch loop */ + int round_events; + + odp_schedule_pause(); + locm->is_sched_paused = true; + + /* empty the locally pre-scheduled events (if any) */ + do { + round_events = dispatch_round(ODP_SCHED_NO_WAIT, + EM_SCHED_MULTI_MAX_BURST, NULL); + events += round_events; + } while (round_events > 0); + } + + return events; +} + +void em_dispatch_opt_init(em_dispatch_opt_t *opt) +{ + if (unlikely(!opt)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_OPT_INIT, + "Bad argument, opt=NULL"); + return; + } + + *opt = dispatch_opt_default; +} + +static inline em_status_t +dispatch_duration(const em_dispatch_duration_t *duration, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out, optional*/) +{ + em_locm_t *const locm = &em_locm; + const bool do_input_poll = locm->do_input_poll && !opt->skip_input_poll; + const bool do_output_drain = locm->do_output_drain && !opt->skip_output_drain; + const bool do_sched_pause = opt->sched_pause; + uint64_t events; + + if (locm->is_sched_paused) { + odp_schedule_resume(); + locm->is_sched_paused = false; + } + + if (do_input_poll || do_output_drain) + events = dispatch_duration_with_userfn(duration, opt, results, + do_input_poll, do_output_drain); + else + events = dispatch_duration_no_userfn(duration, opt, results); + + /* pause scheduling before exiting the dispatch loop */ + if (do_sched_pause) { + odp_schedule_pause(); + locm->is_sched_paused = true; + + int round_events; + uint64_t rounds = 0; + uint16_t burst_size = opt->burst_size; + + /* empty the locally pre-scheduled events (if any) */ + do { + round_events = dispatch_round(ODP_SCHED_NO_WAIT, + burst_size, opt); + events += round_events; + rounds++; + } while (round_events > 0); + + if (results) { + results->rounds += rounds; + results->events = events; + } + } + + return EM_OK; +} + +em_status_t em_dispatch_duration(const em_dispatch_duration_t *duration, + const em_dispatch_opt_t *opt /* optional */, + em_dispatch_results_t *results /*out, optional*/) +{ + RETURN_ERROR_IF(!duration, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, + "Bad argument: duration=NULL"); + + if (!opt) { + opt = &dispatch_opt_default; + } else { + RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, + EM_ERR_NOT_INITIALIZED, EM_ESCOPE_DISPATCH_DURATION, + "Not initialized: em_dispatch_opt_init(opt) not called"); + } + + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, + EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, + "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", + opt->burst_size, EM_SCHED_MULTI_MAX_BURST); + } + + if (EM_CHECK_LEVEL > 1) { + /* _FLAG_LAST is 'pow2 + 1' */ + const em_dispatch_duration_select_t next_pow2 = + (EM_DISPATCH_DURATION_LAST >> 1) << 2; + + RETURN_ERROR_IF(duration->select >= next_pow2, + EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, + "Bad option: duration->select=0x%x invalid", duration->select); + RETURN_ERROR_IF(((duration->select & EM_DISPATCH_DURATION_ROUNDS && + duration->rounds == 0) || + (duration->select & EM_DISPATCH_DURATION_NS && + duration->ns == 0) || + (duration->select & EM_DISPATCH_DURATION_EVENTS && + duration->events == 0) || + (duration->select & EM_DISPATCH_DURATION_NO_EVENTS_ROUNDS && + duration->no_events.rounds == 0) || + (duration->select & EM_DISPATCH_DURATION_NO_EVENTS_NS && + duration->no_events.ns == 0)), + EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_DURATION, + "Bad option: opt.duration is zero(0)."); + } + + return dispatch_duration(duration, opt, results); +} + +em_status_t em_dispatch_ns(uint64_t ns, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out*/) +{ + RETURN_ERROR_IF(ns == 0, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_NS, + "Bad argument: ns=0"); + + if (!opt) { + opt = &dispatch_opt_default; + } else { + RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, + EM_ERR_NOT_INITIALIZED, EM_ESCOPE_DISPATCH_NS, + "Not initialized: em_dispatch_opt_init(opt) not called"); + } + + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, + EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_NS, + "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", + opt->burst_size, EM_SCHED_MULTI_MAX_BURST); + } + + const em_dispatch_duration_t duration = { + .select = EM_DISPATCH_DURATION_NS, + .ns = ns + }; + + return dispatch_duration(&duration, opt, results); +} + +em_status_t em_dispatch_events(uint64_t events, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out*/) +{ + RETURN_ERROR_IF(events == 0, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_EVENTS, + "Bad argument: events=0"); + + if (!opt) { + opt = &dispatch_opt_default; + } else { + RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, + EM_ERR_NOT_INITIALIZED, EM_ESCOPE_DISPATCH_EVENTS, + "Not initialized: em_dispatch_opt_init(opt) not called"); + } + + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, + EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_EVENTS, + "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", + opt->burst_size, EM_SCHED_MULTI_MAX_BURST); + } + + const em_dispatch_duration_t duration = { + .select = EM_DISPATCH_DURATION_EVENTS, + .events = events + }; + + return dispatch_duration(&duration, opt, results); +} + +em_status_t em_dispatch_rounds(uint64_t rounds, + const em_dispatch_opt_t *opt, + em_dispatch_results_t *results /*out*/) +{ + RETURN_ERROR_IF(rounds == 0, EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_ROUNDS, + "Bad argument: rounds=0"); + + if (!opt) { + opt = &dispatch_opt_default; + } else { + RETURN_ERROR_IF(opt->__internal_check != EM_CHECK_INIT_CALLED, + EM_ERR_NOT_INITIALIZED, EM_ESCOPE_DISPATCH_ROUNDS, + "Not initialized: em_dispatch_opt_init(opt) not called"); + } + + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(opt->burst_size == 0 || opt->burst_size > EM_SCHED_MULTI_MAX_BURST, + EM_ERR_BAD_ARG, EM_ESCOPE_DISPATCH_ROUNDS, + "Bad option: 0 < opt.burst_size (%" PRIu64 ") <= %u (max)", + opt->burst_size, EM_SCHED_MULTI_MAX_BURST); + } + + const em_dispatch_duration_t duration = { + .select = EM_DISPATCH_DURATION_ROUNDS, + .rounds = rounds + }; + + return dispatch_duration(&duration, opt, results); +} + +em_status_t +em_dispatch_register_enter_cb(em_dispatch_enter_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_enter = func; + stat = hook_register(DISPATCH_CALLBACK_ENTER, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, + "Dispatch callback register failed"); + + return EM_OK; +} + +em_status_t +em_dispatch_unregister_enter_cb(em_dispatch_enter_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_enter = func; + stat = hook_unregister(DISPATCH_CALLBACK_ENTER, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, + "Dispatch callback unregister failed"); + + return EM_OK; +} + +em_status_t +em_dispatch_register_exit_cb(em_dispatch_exit_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_exit = func; + stat = hook_register(DISPATCH_CALLBACK_EXIT, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, + "Dispatch callback register failed"); + return EM_OK; +} + +em_status_t +em_dispatch_unregister_exit_cb(em_dispatch_exit_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_exit = func; + stat = hook_unregister(DISPATCH_CALLBACK_EXIT, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, + "Dispatch callback unregister failed"); + return EM_OK; +} diff --git a/src/event_machine_eo.c b/src/event_machine_eo.c old mode 100644 new mode 100755 index 6267c94c..f987e1d9 --- a/src/event_machine_eo.c +++ b/src/event_machine_eo.c @@ -1,1165 +1,1165 @@ -/* - * Copyright (c) 2015-2023, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* Per core (thread) state of em_eo_get_next() */ -static ENV_LOCAL unsigned int _eo_tbl_iter_idx; -/* Per core (thread) state of em_eo_queue_get_next() */ -static ENV_LOCAL unsigned int _eo_q_iter_idx; -static ENV_LOCAL em_eo_t _eo_q_iter_eo; - -em_eo_t -em_eo_create(const char *name, - em_start_func_t start, - em_start_local_func_t local_start, - em_stop_func_t stop, - em_stop_local_func_t local_stop, - em_receive_func_t receive, - const void *eo_ctx) -{ - em_eo_t eo; - eo_elem_t *eo_elem; - - if (unlikely(start == NULL || stop == NULL || receive == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_CREATE, - "Mandatory EO function pointer(s) NULL!"); - return EM_EO_UNDEF; - } - - eo = eo_alloc(); - if (unlikely(eo == EM_EO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_CREATE, - "EO alloc failed!"); - return EM_EO_UNDEF; - } - - eo_elem = eo_elem_get(eo); - if (unlikely(eo_elem == NULL)) { - /* Fatal since eo_alloc() returned 'ok', should never happen */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), EM_ESCOPE_EO_CREATE, - "Invalid EO:%" PRI_EO "", eo); - return EM_EO_UNDEF; - } - - env_spinlock_lock(&eo_elem->lock); - - /* Store the name */ - if (name != NULL) { - strncpy(eo_elem->name, name, sizeof(eo_elem->name)); - eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; - } else { - eo_elem->name[0] = '\0'; - } - - /* EO's queue list init */ - list_init(&eo_elem->queue_list); - /* EO start: event buffering init */ - eo_elem->stash = ODP_STASH_INVALID; - - eo_elem->state = EM_EO_STATE_CREATED; - eo_elem->start_func = start; - eo_elem->start_local_func = local_start; - eo_elem->stop_func = stop; - eo_elem->stop_local_func = local_stop; - - eo_elem->use_multi_rcv = EM_FALSE; - eo_elem->max_events = 1; - eo_elem->receive_func = receive; - eo_elem->receive_multi_func = NULL; - - eo_elem->error_handler_func = NULL; - eo_elem->eo_ctx = (void *)(uintptr_t)eo_ctx; - eo_elem->eo = eo; - env_atomic32_init(&eo_elem->num_queues); - - env_spinlock_unlock(&eo_elem->lock); - - return eo; -} - -void em_eo_multircv_param_init(em_eo_multircv_param_t *param) -{ - if (unlikely(!param)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ARG), - EM_ESCOPE_EO_MULTIRCV_PARAM_INIT, - "Param pointer NULL!"); - return; - } - memset(param, 0, sizeof(em_eo_multircv_param_t)); - param->max_events = EM_EO_MULTIRCV_MAX_EVENTS; - param->__internal_check = EM_CHECK_INIT_CALLED; -} - -em_eo_t -em_eo_create_multircv(const char *name, const em_eo_multircv_param_t *param) -{ - em_eo_t eo; - eo_elem_t *eo_elem; - int max_events; - - if (unlikely(!param || - param->__internal_check != EM_CHECK_INIT_CALLED)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_CREATE_MULTIRCV, - "Invalid param ptr:\n" - "Use em_eo_multircv_param_init() before create"); - return EM_EO_UNDEF; - } - - if (unlikely(!param->start || !param->stop || !param->receive_multi)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_CREATE_MULTIRCV, - "Mandatory EO function pointer(s) NULL!"); - return EM_EO_UNDEF; - } - - if (unlikely(param->max_events < 0)) { - INTERNAL_ERROR(EM_ERR_TOO_SMALL, EM_ESCOPE_EO_CREATE_MULTIRCV, - "Max number of events too small:%d", - param->max_events); - return EM_EO_UNDEF; - } - max_events = param->max_events; - if (max_events == 0) /* user requests default value */ - max_events = EM_EO_MULTIRCV_MAX_EVENTS; - - eo = eo_alloc(); - if (unlikely(eo == EM_EO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_CREATE_MULTIRCV, - "EO alloc failed!"); - return EM_EO_UNDEF; - } - - eo_elem = eo_elem_get(eo); - if (unlikely(eo_elem == NULL)) { - /* Fatal since eo_alloc() returned 'ok', should never happen */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), - EM_ESCOPE_EO_CREATE_MULTIRCV, - "Invalid EO:%" PRI_EO "", eo); - return EM_EO_UNDEF; - } - - env_spinlock_lock(&eo_elem->lock); - - /* Store the name */ - if (name) { - strncpy(eo_elem->name, name, sizeof(eo_elem->name)); - eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; - } else { - eo_elem->name[0] = '\0'; - } - - /* EO's queue list init */ - list_init(&eo_elem->queue_list); - /* EO start: event buffering init */ - eo_elem->stash = ODP_STASH_INVALID; - - eo_elem->state = EM_EO_STATE_CREATED; - eo_elem->start_func = param->start; - eo_elem->start_local_func = param->local_start; - eo_elem->stop_func = param->stop; - eo_elem->stop_local_func = param->local_stop; - - eo_elem->use_multi_rcv = EM_TRUE; - eo_elem->max_events = max_events; - eo_elem->receive_func = NULL; - eo_elem->receive_multi_func = param->receive_multi; - - eo_elem->error_handler_func = NULL; - eo_elem->eo_ctx = (void *)(uintptr_t)param->eo_ctx; - eo_elem->eo = eo; - env_atomic32_init(&eo_elem->num_queues); - - env_spinlock_unlock(&eo_elem->lock); - - return eo; -} - -em_status_t -em_eo_delete(em_eo_t eo) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t status; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, EM_ESCOPE_EO_DELETE, - "Invalid EO:%" PRI_EO "!", eo); - - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_NOT_CREATED, EM_ESCOPE_EO_DELETE, - "EO not allocated:%" PRI_EO "", eo); - - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED && - eo_elem->state != EM_EO_STATE_ERROR, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_DELETE, - "EO invalid state, cannot delete:%d", eo_elem->state); - - status = eo_delete_queue_all(eo_elem); - - RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, - "EO delete: delete queues failed!"); - - /* Free EO back into the eo-pool and mark state=EO_STATE_UNDEF */ - status = eo_free(eo); - RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, - "EO delete failed!"); - - return status; -} - -size_t -em_eo_get_name(em_eo_t eo, char *name, size_t maxlen) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - - if (name == NULL || maxlen == 0) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_NAME, - "Invalid ptr or maxlen (name=0x%" PRIx64 ", maxlen=%zu)", - name, maxlen); - return 0; - } - - name[0] = '\0'; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_NAME, - "Invalid EO%" PRI_EO "", eo); - return 0; - } - - if (unlikely(!eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_EO_GET_NAME, - "EO not created:%" PRI_EO "", eo); - return 0; - } - - return eo_get_name(eo_elem, name, maxlen); -} - -em_eo_t -em_eo_find(const char *name) -{ - if (name && *name) { - for (int i = 0; i < EM_MAX_EOS; i++) { - const eo_elem_t *eo_elem = &em_shm->eo_tbl.eo_elem[i]; - - if (eo_elem->state != EM_EO_STATE_UNDEF && - !strncmp(name, eo_elem->name, EM_EO_NAME_LEN - 1)) - return eo_elem->eo; - } - } - return EM_EO_UNDEF; -} - -/** - * @brief Helper for em_eo_add_queue/_sync() - */ -static em_status_t -eo_add_queue_escope(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[], - em_escope_t escope) -{ eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const q_elem = queue_elem_get(queue); - em_queue_type_t q_type; - em_status_t err; - int valid; - - RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, - EM_ERR_BAD_ARG, escope, - "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), - EM_ERR_NOT_CREATED, escope, - "Not created: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - - q_type = em_queue_get_type(queue); - valid = q_type == EM_QUEUE_TYPE_ATOMIC || - q_type == EM_QUEUE_TYPE_PARALLEL || - q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || - q_type == EM_QUEUE_TYPE_LOCAL; - RETURN_ERROR_IF(!valid, EM_ERR_BAD_TYPE, escope, - "Invalid queue type: %" PRI_QTYPE "", q_type); - - if (num_notif > 0) { - err = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(err != EM_OK, err, escope, - "Invalid notif cfg given!"); - } - - err = eo_add_queue(eo_elem, q_elem); - RETURN_ERROR_IF(err != EM_OK, err, escope, - "eo_add_queue(Q:%" PRI_QUEUE ") fails", queue); - - if (eo_elem->state == EM_EO_STATE_RUNNING) { - err = queue_enable(q_elem); /* otherwise enabled in eo-start */ - RETURN_ERROR_IF(err != EM_OK, err, escope, - "queue_enable(Q:%" PRI_QUEUE ") fails", queue); - } - - if (num_notif > 0) { - /* Send notifications if requested */ - err = send_notifs(num_notif, notif_tbl); - RETURN_ERROR_IF(err != EM_OK, err, escope, - "EO:%" PRI_EO " send notif fails", eo); - } - - return EM_OK; -} - -em_status_t -em_eo_add_queue(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[]) -{ - return eo_add_queue_escope(eo, queue, num_notif, notif_tbl, - EM_ESCOPE_EO_ADD_QUEUE); -} - -em_status_t -em_eo_add_queue_sync(em_eo_t eo, em_queue_t queue) -{ - /* No sync blocking needed when adding a queue to an EO */ - return eo_add_queue_escope(eo, queue, 0, NULL, - EM_ESCOPE_EO_ADD_QUEUE_SYNC); -} - -em_status_t -em_eo_remove_queue(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const q_elem = queue_elem_get(queue); - em_queue_type_t q_type; - em_status_t ret; - int valid; - - RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_EO_REMOVE_QUEUE, - "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), - EM_ERR_NOT_CREATED, EM_ESCOPE_EO_REMOVE_QUEUE, - "Not created: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - - q_type = em_queue_get_type(queue); - valid = q_type == EM_QUEUE_TYPE_ATOMIC || - q_type == EM_QUEUE_TYPE_PARALLEL || - q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || - q_type == EM_QUEUE_TYPE_LOCAL; - RETURN_ERROR_IF(!valid, EM_ERR_BAD_TYPE, EM_ESCOPE_EO_REMOVE_QUEUE, - "Invalid queue type: %" PRI_QTYPE "", q_type); - - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, - "Invalid notif cfg given!"); - RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, - EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE, - "Can't remove Q:%" PRI_QUEUE ", not added to this EO", - queue); - - /* - * Disable the queue if not already done, dispatcher will drop any - * further events. Need to handle events from the queue being processed - * in an EO receive function properly still. - */ - if (q_elem->state == EM_QUEUE_STATE_READY) { - ret = queue_disable(q_elem); - - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, - "queue_disable(Q:%" PRI_QUEUE ") fails", - queue); - } - - /* - * Request each core to run locally the eo_remove_queue_local() function - * and when all are done call eo_remove_queue_done_callback(). - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - return eo_remove_queue_local_req(eo_elem, q_elem, num_notif, notif_tbl); -} - -em_status_t -em_eo_remove_queue_sync(em_eo_t eo, em_queue_t queue) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const q_elem = queue_elem_get(queue); - em_queue_type_t q_type; - em_status_t ret; - int valid; - - RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), - EM_ERR_NOT_CREATED, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Not created: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - - q_type = em_queue_get_type(queue); - valid = q_type == EM_QUEUE_TYPE_ATOMIC || - q_type == EM_QUEUE_TYPE_PARALLEL || - q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || - q_type == EM_QUEUE_TYPE_LOCAL; - RETURN_ERROR_IF(!valid, EM_ERR_BAD_TYPE, - EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Invalid queue type: %" PRI_QTYPE "", q_type); - - RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, - EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Can't remove Q:%" PRI_QUEUE ", not added to this EO", - queue); - - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - /* - * Disable the queue if not already done, dispatcher will drop any - * further events. Need to handle events from the queue being processed - * in an EO receive function properly still. - */ - if (q_elem->state == EM_QUEUE_STATE_READY) { - ret = queue_disable(q_elem); - - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_sync_error; - } - - /* - * Request each core to run locally the eo_remove_queue_sync_local() function - * and when all are done call eo_remove_queue_sync_done_callback. - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - ret = eo_remove_queue_sync_local_req(eo_elem, q_elem); - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_sync_error; - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all concerned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - return EM_OK; - -eo_remove_queue_sync_error: - locm->sync_api.in_progress = false; - - return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Failure: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); -} - -em_status_t -em_eo_remove_queue_all(em_eo_t eo, int delete_queues, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_NOT_CREATED, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "EO:%" PRI_EO " not created", eo); - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "Invalid notif cfg given!"); - - ret = queue_disable_all(eo_elem); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "queue_disable_all() failed!"); - - /* - * Request each core to run locally the eo_remove_queue_all_local() function - * and when all are done call eo_remove_queue_all_done_callback(). - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - return eo_remove_queue_all_local_req(eo_elem, delete_queues, - num_notif, notif_tbl); -} - -em_status_t -em_eo_remove_queue_all_sync(em_eo_t eo, int delete_queues) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_NOT_CREATED, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, - "EO:%" PRI_EO " not created", eo); - - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - ret = queue_disable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_all_sync_error; - - /* - * Request each core to run locally the eo_remove_queue_all_sync_local() function - * and when all are done call eo_remove_queue_all_sync_done_callback(). - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - ret = eo_remove_queue_all_sync_local_req(eo_elem, delete_queues); - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_all_sync_error; - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all concerned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - return EM_OK; - -eo_remove_queue_all_sync_error: - locm->sync_api.in_progress = false; - - return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Failure: EO:%" PRI_EO "", eo); -} - -em_status_t -em_eo_register_error_handler(em_eo_t eo, em_error_handler_t handler) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - - RETURN_ERROR_IF(eo_elem == NULL || handler == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, - "Invalid args: EO:%" PRI_EO " handler:%p", eo, handler); - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_NOT_CREATED, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, - "EO:%" PRI_EO " not created", eo); - - env_spinlock_lock(&eo_elem->lock); - eo_elem->error_handler_func = handler; - env_spinlock_unlock(&eo_elem->lock); - - return EM_OK; -} - -em_status_t -em_eo_unregister_error_handler(em_eo_t eo) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, - EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, - "Invalid EO id %" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_NOT_CREATED, - EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, - "EO not created:%" PRI_EO "", eo); - - env_spinlock_lock(&eo_elem->lock); - eo_elem->error_handler_func = NULL; - env_spinlock_unlock(&eo_elem->lock); - - return EM_OK; -} - -em_status_t -em_eo_start(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf, - int num_notif, const em_notif_t notif_tbl[]) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, EM_ESCOPE_EO_START, - "Invalid EO id %" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_NOT_CREATED, EM_ESCOPE_EO_START, - "EO not created:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_START, - "EO invalid state, cannot start:%d", eo_elem->state); - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_START, - "Invalid notif cfg given!"); - - eo_elem->state = EM_EO_STATE_STARTING; - - /* Create a stash to buffer events sent during EO-start */ - eo_elem->stash = eo_start_stash_create(); - if (unlikely(eo_elem->stash == ODP_STASH_INVALID)) { - ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " start stash creation fails", eo); - goto eo_start_error; - } - /* This core is in the EO start function: buffer all sent events */ - locm->start_eo_elem = eo_elem; - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO start functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = (uint16_t)(uintptr_t)eo; - - locm->current.q_elem = &tmp_q_elem; - /* Call the global EO start function */ - ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - locm->start_eo_elem = NULL; - - /* Store the return value of the actual EO global start function */ - if (result != NULL) - *result = ret; - - if (unlikely(ret != EM_OK)) { - ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " start func fails:0x%08x", - eo, ret); - /* user error handler might change error from own eo-start */ - if (ret != EM_OK) - goto eo_start_error; - } - - if (eo_elem->start_local_func != NULL) { - /* - * Notifications sent when the local start functions - * have completed. - */ - ret = eo_start_local_req(eo_elem, num_notif, notif_tbl); - - if (unlikely(ret != EM_OK)) { - INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " local start func fails", - eo); - /* Can't allow user err handler to change error here */ - goto eo_start_error; - } - /* - * Note: Return here, queues will be enabled after the local - * start funcs complete. - * EO state changed to 'EM_EO_STATE_RUNNING' after successful - * completion of EO local starts on all cores. - */ - return EM_OK; - } - - /* - * Enable all the EO's queues. - * Note: if local start functions are given then enable can be done only - * after they have been run on each core. - */ - ret = queue_enable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_start_error; - - eo_elem->state = EM_EO_STATE_RUNNING; - - /* Send events buffered during the EO-start/local-start functions */ - eo_start_send_buffered_events(eo_elem); - - if (num_notif > 0) { - /* Send notifications if requested */ - ret = send_notifs(num_notif, notif_tbl); - - if (unlikely(ret != EM_OK)) { - ret = INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " send notif fails", - eo); - /* user error handler might change error */ - if (ret != EM_OK) - goto eo_start_error; - } - } - - return EM_OK; - -eo_start_error: - /* roll back state to allow EO delete */ - eo_elem->state = EM_EO_STATE_ERROR; - return ret; -} - -em_status_t -em_eo_start_sync(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, EM_ESCOPE_EO_START_SYNC, - "Invalid EO id %" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_NOT_CREATED, EM_ESCOPE_EO_START_SYNC, - "EO not created:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_START_SYNC, - "EO invalid state, cannot start:%d", eo_elem->state); - - eo_elem->state = EM_EO_STATE_STARTING; - - /* Create a stash to buffer events sent during EO-start */ - eo_elem->stash = eo_start_stash_create(); - if (unlikely(eo_elem->stash == ODP_STASH_INVALID)) { - ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " start stash creation fails", eo); - /* roll back state to allow EO delete */ - eo_elem->state = EM_EO_STATE_ERROR; - return ret; - } - /* This core is in the EO start function: buffer all sent events */ - locm->start_eo_elem = eo_elem; - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO start functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = (uint16_t)(uintptr_t)eo; - locm->current.q_elem = &tmp_q_elem; - /* Call the global EO start function */ - ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - locm->start_eo_elem = NULL; - - /* Store the return value of the actual EO global start function */ - if (result != NULL) - *result = ret; - - if (unlikely(ret != EM_OK)) { - ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START_SYNC, - "EO:%" PRI_EO " start func fails:0x%08x", - eo, ret); - /* user error handler might change error from own eo-start */ - if (ret != EM_OK) { - /* roll back state to allow EO delete */ - eo_elem->state = EM_EO_STATE_ERROR; - return ret; - } - } - - if (eo_elem->start_local_func != NULL) { - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - locm->start_eo_elem = eo_elem; - locm->current.q_elem = &tmp_q_elem; - /* Call the local start on this core */ - ret = eo_elem->start_local_func(eo_elem->eo_ctx, eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - locm->start_eo_elem = NULL; - - if (unlikely(ret != EM_OK)) { - INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, - "EO:%" PRI_EO " local start func fails", eo); - /* Can't allow user err handler to change error here */ - goto eo_start_sync_error; - } - - ret = eo_start_sync_local_req(eo_elem); - if (unlikely(ret != EM_OK)) { - INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, - "EO:%" PRI_EO " eo_start_sync_local_req", eo); - /* Can't allow user err handler to change error here */ - goto eo_start_sync_error; - } - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all concerned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - /* Send events buffered during the EO-start/local-start funcs */ - eo_start_send_buffered_events(eo_elem); - /* - * EO state changed to 'EO_STATE_RUNNING' after successful - * completion of EO local starts on all cores. - */ - return EM_OK; - } - - /* - * Enable all the EO's queues. - * Note: if local start functions are given then enable can be done only - * after they have been run on each core. - */ - ret = queue_enable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_start_sync_error; - - eo_elem->state = EM_EO_STATE_RUNNING; - - /* Send events buffered during the EO-start/local-start functions */ - eo_start_send_buffered_events(eo_elem); - return EM_OK; - -eo_start_sync_error: - locm->sync_api.in_progress = false; - /* roll back state to allow EO delete */ - eo_elem->state = EM_EO_STATE_ERROR; - return ret; -} - -em_status_t -em_eo_stop(em_eo_t eo, int num_notif, const em_notif_t notif_tbl[]) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), - EM_ERR_BAD_ARG, EM_ESCOPE_EO_STOP, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP, - "EO invalid state, cannot stop:%d", eo_elem->state); - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP, - "Invalid notif cfg given!"); - - eo_elem->state = EM_EO_STATE_STOPPING; - - /* - * Disable all queues. - * It doesn't matter if some of the queues are already disabled. - */ - queue_disable_all(eo_elem); - - /* - * Notifications sent when the local stop functions - * have completed. EO global stop called when all local stops have - * been completed. EO state changed to 'stopped' only after completing - * the EO global stop function. - */ - ret = eo_stop_local_req(eo_elem, num_notif, notif_tbl); - - if (unlikely(ret != EM_OK)) { - eo_elem->state = EM_EO_STATE_ERROR; - INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP, - "EO:%" PRI_EO " local stop func fails", eo); - /* Can't allow user err handler to change error here */ - return ret; - } - - return EM_OK; -} - -em_status_t -em_eo_stop_sync(em_eo_t eo) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), - EM_ERR_BAD_ARG, EM_ESCOPE_EO_STOP_SYNC, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP_SYNC, - "EO invalid state, cannot stop:%d", eo_elem->state); - - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - eo_elem->state = EM_EO_STATE_STOPPING; - - /* - * Disable all queues. - * It doesn't matter if some of the queues are already disabled. - */ - ret = queue_disable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_stop_sync_error; - - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO stop functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = (uint16_t)(uintptr_t)eo; - - if (eo_elem->stop_local_func != NULL) { - locm->current.q_elem = &tmp_q_elem; - /* Call the local stop on this core */ - ret = eo_elem->stop_local_func(eo_elem->eo_ctx, eo_elem->eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - if (unlikely(ret != EM_OK)) - goto eo_stop_sync_error; - } - - /* - * Notifications sent when the local stop functions have completed. - * EO global stop called when all local stops have been completed. - * EO state changed to 'stopped' only after completing the EO global - * stop function. - */ - ret = eo_stop_sync_local_req(eo_elem); - - if (unlikely(ret != EM_OK)) { - eo_elem->state = EM_EO_STATE_ERROR; - INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, - "EO:%" PRI_EO " local stop func fails", eo); - /* Can't allow user err handler to change error here */ - goto eo_stop_sync_error; - } - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all concerned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - /* Change state here to allow em_eo_delete() from EO global stop */ - eo_elem->state = EM_EO_STATE_CREATED; /* == stopped */ - - locm->current.q_elem = &tmp_q_elem; - /* - * Call the Global EO stop function now that all - * EO local stop functions are done. - */ - ret = eo_elem->stop_func(eo_elem->eo_ctx, eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP_SYNC, - "EO:%" PRI_EO " stop-func failed", eo); - /* - * Note: the EO might not be available after this if the EO global stop - * called em_eo_delete()! - */ - return EM_OK; - -eo_stop_sync_error: - locm->sync_api.in_progress = false; - return INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, - "Failure: EO:%" PRI_EO "", eo); -} - -em_eo_t -em_eo_current(void) -{ - return eo_current(); -} - -void * -em_eo_get_context(em_eo_t eo) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - em_eo_state_t eo_state; - - if (unlikely(EM_CHECK_LEVEL > 0 && eo_elem == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_CONTEXT, - "Invalid EO:%" PRI_EO "", eo); - return NULL; - } - - if (unlikely(EM_CHECK_LEVEL >= 2 && !eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_EO_GET_CONTEXT, - "EO:%" PRI_EO " not created!", eo); - return NULL; - } - - eo_state = eo_elem->state; - if (unlikely(EM_CHECK_LEVEL > 0 && eo_state < EM_EO_STATE_CREATED)) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_EO_GET_CONTEXT, - "Invalid EO state: EO:%" PRI_EO " state:%d", - eo, eo_state); - return NULL; - } - - return eo_elem->eo_ctx; -} - -em_eo_state_t -em_eo_get_state(em_eo_t eo) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - - if (unlikely(EM_CHECK_LEVEL > 0 && eo_elem == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_STATE, - "Invalid EO:%" PRI_EO "", eo); - return EM_EO_STATE_UNDEF; - } - - if (unlikely(EM_CHECK_LEVEL >= 2 && !eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_EO_GET_STATE, - "EO:%" PRI_EO " not created", eo); - return EM_EO_STATE_UNDEF; - } - - return eo_elem->state; -} - -em_eo_t -em_eo_get_first(unsigned int *num) -{ - _eo_tbl_iter_idx = 0; /* reset iteration */ - const unsigned int eo_cnt = eo_count(); - - if (num) - *num = eo_cnt; - - if (eo_cnt == 0) { - _eo_tbl_iter_idx = EM_MAX_EOS; /* UNDEF = _get_next() */ - return EM_EO_UNDEF; - } - - /* find first */ - while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { - _eo_tbl_iter_idx++; - if (_eo_tbl_iter_idx >= EM_MAX_EOS) - return EM_EO_UNDEF; - } - - return eo_idx2hdl(_eo_tbl_iter_idx); -} - -em_eo_t -em_eo_get_next(void) -{ - if (_eo_tbl_iter_idx >= EM_MAX_EOS - 1) - return EM_EO_UNDEF; - - _eo_tbl_iter_idx++; - - /* find next */ - while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { - _eo_tbl_iter_idx++; - if (_eo_tbl_iter_idx >= EM_MAX_EOS) - return EM_EO_UNDEF; - } - - return eo_idx2hdl(_eo_tbl_iter_idx); -} - -em_queue_t -em_eo_queue_get_first(unsigned int *num, em_eo_t eo) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - const unsigned int max_queues = em_shm->opt.queue.max_num; - - if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_QUEUE_GET_FIRST, - "Invalid EO:%" PRI_EO "", eo); - if (num) - *num = 0; - return EM_QUEUE_UNDEF; - } - - const unsigned int num_queues = env_atomic32_get(&eo_elem->num_queues); - - if (num) - *num = num_queues; - - if (num_queues == 0) { - _eo_q_iter_idx = max_queues; /* UNDEF = _get_next() */ - return EM_QUEUE_UNDEF; - } - - /* - * An 'eo_elem' contains a linked list with all it's queues. That list - * might be modified while processing this iteration, so instead we just - * go through the whole queue table. - * This is potentially a slow implementation and perhaps worth - * re-thinking? - */ - const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; - - _eo_q_iter_idx = 0; /* reset list */ - _eo_q_iter_eo = eo; - - /* find first */ - while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || - queue_tbl->queue_elem[_eo_q_iter_idx].eo != (uint16_t)(uintptr_t)_eo_q_iter_eo) { - _eo_q_iter_idx++; - if (_eo_q_iter_idx >= max_queues) - return EM_QUEUE_UNDEF; - } - - return queue_idx2hdl(_eo_q_iter_idx); -} - -em_queue_t -em_eo_queue_get_next(void) -{ - const unsigned int max_queues = em_shm->opt.queue.max_num; - - if (_eo_q_iter_idx >= max_queues - 1) - return EM_QUEUE_UNDEF; - - _eo_q_iter_idx++; - - const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; - - /* find next */ - while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || - queue_tbl->queue_elem[_eo_q_iter_idx].eo != (uint16_t)(uintptr_t)_eo_q_iter_eo) { - _eo_q_iter_idx++; - if (_eo_q_iter_idx >= max_queues) - return EM_QUEUE_UNDEF; - } - - return queue_idx2hdl(_eo_q_iter_idx); -} - -uint64_t em_eo_to_u64(em_eo_t eo) -{ - return (uint64_t)eo; -} +/* + * Copyright (c) 2015-2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* Per core (thread) state of em_eo_get_next() */ +static ENV_LOCAL unsigned int _eo_tbl_iter_idx; +/* Per core (thread) state of em_eo_queue_get_next() */ +static ENV_LOCAL unsigned int _eo_q_iter_idx; +static ENV_LOCAL em_eo_t _eo_q_iter_eo; + +em_eo_t +em_eo_create(const char *name, + em_start_func_t start, + em_start_local_func_t local_start, + em_stop_func_t stop, + em_stop_local_func_t local_stop, + em_receive_func_t receive, + const void *eo_ctx) +{ + em_eo_t eo; + eo_elem_t *eo_elem; + + if (unlikely(start == NULL || stop == NULL || receive == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_CREATE, + "Mandatory EO function pointer(s) NULL!"); + return EM_EO_UNDEF; + } + + eo = eo_alloc(); + if (unlikely(eo == EM_EO_UNDEF)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_CREATE, + "EO alloc failed!"); + return EM_EO_UNDEF; + } + + eo_elem = eo_elem_get(eo); + if (unlikely(eo_elem == NULL)) { + /* Fatal since eo_alloc() returned 'ok', should never happen */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), EM_ESCOPE_EO_CREATE, + "Invalid EO:%" PRI_EO "", eo); + return EM_EO_UNDEF; + } + + env_spinlock_lock(&eo_elem->lock); + + /* Store the name */ + if (name != NULL) { + strncpy(eo_elem->name, name, sizeof(eo_elem->name)); + eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; + } else { + eo_elem->name[0] = '\0'; + } + + /* EO's queue list init */ + list_init(&eo_elem->queue_list); + /* EO start: event buffering init */ + eo_elem->stash = ODP_STASH_INVALID; + + eo_elem->state = EM_EO_STATE_CREATED; + eo_elem->start_func = start; + eo_elem->start_local_func = local_start; + eo_elem->stop_func = stop; + eo_elem->stop_local_func = local_stop; + + eo_elem->use_multi_rcv = EM_FALSE; + eo_elem->max_events = 1; + eo_elem->receive_func = receive; + eo_elem->receive_multi_func = NULL; + + eo_elem->error_handler_func = NULL; + eo_elem->eo_ctx = (void *)(uintptr_t)eo_ctx; + eo_elem->eo = eo; + env_atomic32_init(&eo_elem->num_queues); + + env_spinlock_unlock(&eo_elem->lock); + + return eo; +} + +void em_eo_multircv_param_init(em_eo_multircv_param_t *param) +{ + if (unlikely(!param)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ARG), + EM_ESCOPE_EO_MULTIRCV_PARAM_INIT, + "Param pointer NULL!"); + return; + } + memset(param, 0, sizeof(em_eo_multircv_param_t)); + param->max_events = EM_EO_MULTIRCV_MAX_EVENTS; + param->__internal_check = EM_CHECK_INIT_CALLED; +} + +em_eo_t +em_eo_create_multircv(const char *name, const em_eo_multircv_param_t *param) +{ + em_eo_t eo; + eo_elem_t *eo_elem; + int max_events; + + if (unlikely(!param || + param->__internal_check != EM_CHECK_INIT_CALLED)) { + INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_EO_CREATE_MULTIRCV, + "Invalid param ptr:\n" + "Use em_eo_multircv_param_init() before create"); + return EM_EO_UNDEF; + } + + if (unlikely(!param->start || !param->stop || !param->receive_multi)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_CREATE_MULTIRCV, + "Mandatory EO function pointer(s) NULL!"); + return EM_EO_UNDEF; + } + + if (unlikely(param->max_events < 0)) { + INTERNAL_ERROR(EM_ERR_TOO_SMALL, EM_ESCOPE_EO_CREATE_MULTIRCV, + "Max number of events too small:%d", + param->max_events); + return EM_EO_UNDEF; + } + max_events = param->max_events; + if (max_events == 0) /* user requests default value */ + max_events = EM_EO_MULTIRCV_MAX_EVENTS; + + eo = eo_alloc(); + if (unlikely(eo == EM_EO_UNDEF)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_CREATE_MULTIRCV, + "EO alloc failed!"); + return EM_EO_UNDEF; + } + + eo_elem = eo_elem_get(eo); + if (unlikely(eo_elem == NULL)) { + /* Fatal since eo_alloc() returned 'ok', should never happen */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), + EM_ESCOPE_EO_CREATE_MULTIRCV, + "Invalid EO:%" PRI_EO "", eo); + return EM_EO_UNDEF; + } + + env_spinlock_lock(&eo_elem->lock); + + /* Store the name */ + if (name) { + strncpy(eo_elem->name, name, sizeof(eo_elem->name)); + eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; + } else { + eo_elem->name[0] = '\0'; + } + + /* EO's queue list init */ + list_init(&eo_elem->queue_list); + /* EO start: event buffering init */ + eo_elem->stash = ODP_STASH_INVALID; + + eo_elem->state = EM_EO_STATE_CREATED; + eo_elem->start_func = param->start; + eo_elem->start_local_func = param->local_start; + eo_elem->stop_func = param->stop; + eo_elem->stop_local_func = param->local_stop; + + eo_elem->use_multi_rcv = EM_TRUE; + eo_elem->max_events = max_events; + eo_elem->receive_func = NULL; + eo_elem->receive_multi_func = param->receive_multi; + + eo_elem->error_handler_func = NULL; + eo_elem->eo_ctx = (void *)(uintptr_t)param->eo_ctx; + eo_elem->eo = eo; + env_atomic32_init(&eo_elem->num_queues); + + env_spinlock_unlock(&eo_elem->lock); + + return eo; +} + +em_status_t +em_eo_delete(em_eo_t eo) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t status; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, EM_ESCOPE_EO_DELETE, + "Invalid EO:%" PRI_EO "!", eo); + + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_NOT_CREATED, EM_ESCOPE_EO_DELETE, + "EO not allocated:%" PRI_EO "", eo); + + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED && + eo_elem->state != EM_EO_STATE_ERROR, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_DELETE, + "EO invalid state, cannot delete:%d", eo_elem->state); + + status = eo_delete_queue_all(eo_elem); + + RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, + "EO delete: delete queues failed!"); + + /* Free EO back into the eo-pool and mark state=EO_STATE_UNDEF */ + status = eo_free(eo); + RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, + "EO delete failed!"); + + return status; +} + +size_t +em_eo_get_name(em_eo_t eo, char *name, size_t maxlen) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + + if (name == NULL || maxlen == 0) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_NAME, + "Invalid ptr or maxlen (name=0x%" PRIx64 ", maxlen=%zu)", + name, maxlen); + return 0; + } + + name[0] = '\0'; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_NAME, + "Invalid EO%" PRI_EO "", eo); + return 0; + } + + if (unlikely(!eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_EO_GET_NAME, + "EO not created:%" PRI_EO "", eo); + return 0; + } + + return eo_get_name(eo_elem, name, maxlen); +} + +em_eo_t +em_eo_find(const char *name) +{ + if (name && *name) { + for (int i = 0; i < EM_MAX_EOS; i++) { + const eo_elem_t *eo_elem = &em_shm->eo_tbl.eo_elem[i]; + + if (eo_elem->state != EM_EO_STATE_UNDEF && + !strncmp(name, eo_elem->name, EM_EO_NAME_LEN - 1)) + return eo_elem->eo; + } + } + return EM_EO_UNDEF; +} + +/** + * @brief Helper for em_eo_add_queue/_sync() + */ +static em_status_t +eo_add_queue_escope(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[], + em_escope_t escope) +{ eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const q_elem = queue_elem_get(queue); + em_queue_type_t q_type; + em_status_t err; + int valid; + + RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, + EM_ERR_BAD_ARG, escope, + "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), + EM_ERR_NOT_CREATED, escope, + "Not created: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + + q_type = em_queue_get_type(queue); + valid = q_type == EM_QUEUE_TYPE_ATOMIC || + q_type == EM_QUEUE_TYPE_PARALLEL || + q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || + q_type == EM_QUEUE_TYPE_LOCAL; + RETURN_ERROR_IF(!valid, EM_ERR_BAD_TYPE, escope, + "Invalid queue type: %" PRI_QTYPE "", q_type); + + if (num_notif > 0) { + err = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(err != EM_OK, err, escope, + "Invalid notif cfg given!"); + } + + err = eo_add_queue(eo_elem, q_elem); + RETURN_ERROR_IF(err != EM_OK, err, escope, + "eo_add_queue(Q:%" PRI_QUEUE ") fails", queue); + + if (eo_elem->state == EM_EO_STATE_RUNNING) { + err = queue_enable(q_elem); /* otherwise enabled in eo-start */ + RETURN_ERROR_IF(err != EM_OK, err, escope, + "queue_enable(Q:%" PRI_QUEUE ") fails", queue); + } + + if (num_notif > 0) { + /* Send notifications if requested */ + err = send_notifs(num_notif, notif_tbl); + RETURN_ERROR_IF(err != EM_OK, err, escope, + "EO:%" PRI_EO " send notif fails", eo); + } + + return EM_OK; +} + +em_status_t +em_eo_add_queue(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[]) +{ + return eo_add_queue_escope(eo, queue, num_notif, notif_tbl, + EM_ESCOPE_EO_ADD_QUEUE); +} + +em_status_t +em_eo_add_queue_sync(em_eo_t eo, em_queue_t queue) +{ + /* No sync blocking needed when adding a queue to an EO */ + return eo_add_queue_escope(eo, queue, 0, NULL, + EM_ESCOPE_EO_ADD_QUEUE_SYNC); +} + +em_status_t +em_eo_remove_queue(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const q_elem = queue_elem_get(queue); + em_queue_type_t q_type; + em_status_t ret; + int valid; + + RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_EO_REMOVE_QUEUE, + "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), + EM_ERR_NOT_CREATED, EM_ESCOPE_EO_REMOVE_QUEUE, + "Not created: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + + q_type = em_queue_get_type(queue); + valid = q_type == EM_QUEUE_TYPE_ATOMIC || + q_type == EM_QUEUE_TYPE_PARALLEL || + q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || + q_type == EM_QUEUE_TYPE_LOCAL; + RETURN_ERROR_IF(!valid, EM_ERR_BAD_TYPE, EM_ESCOPE_EO_REMOVE_QUEUE, + "Invalid queue type: %" PRI_QTYPE "", q_type); + + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, + "Invalid notif cfg given!"); + RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, + EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE, + "Can't remove Q:%" PRI_QUEUE ", not added to this EO", + queue); + + /* + * Disable the queue if not already done, dispatcher will drop any + * further events. Need to handle events from the queue being processed + * in an EO receive function properly still. + */ + if (q_elem->state == EM_QUEUE_STATE_READY) { + ret = queue_disable(q_elem); + + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, + "queue_disable(Q:%" PRI_QUEUE ") fails", + queue); + } + + /* + * Request each core to run locally the eo_remove_queue_local() function + * and when all are done call eo_remove_queue_done_callback(). + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + return eo_remove_queue_local_req(eo_elem, q_elem, num_notif, notif_tbl); +} + +em_status_t +em_eo_remove_queue_sync(em_eo_t eo, em_queue_t queue) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const q_elem = queue_elem_get(queue); + em_queue_type_t q_type; + em_status_t ret; + int valid; + + RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), + EM_ERR_NOT_CREATED, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Not created: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + + q_type = em_queue_get_type(queue); + valid = q_type == EM_QUEUE_TYPE_ATOMIC || + q_type == EM_QUEUE_TYPE_PARALLEL || + q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || + q_type == EM_QUEUE_TYPE_LOCAL; + RETURN_ERROR_IF(!valid, EM_ERR_BAD_TYPE, + EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Invalid queue type: %" PRI_QTYPE "", q_type); + + RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, + EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Can't remove Q:%" PRI_QUEUE ", not added to this EO", + queue); + + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + /* + * Disable the queue if not already done, dispatcher will drop any + * further events. Need to handle events from the queue being processed + * in an EO receive function properly still. + */ + if (q_elem->state == EM_QUEUE_STATE_READY) { + ret = queue_disable(q_elem); + + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_sync_error; + } + + /* + * Request each core to run locally the eo_remove_queue_sync_local() function + * and when all are done call eo_remove_queue_sync_done_callback. + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + ret = eo_remove_queue_sync_local_req(eo_elem, q_elem); + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_sync_error; + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all concerned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + return EM_OK; + +eo_remove_queue_sync_error: + locm->sync_api.in_progress = false; + + return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Failure: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); +} + +em_status_t +em_eo_remove_queue_all(em_eo_t eo, int delete_queues, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_NOT_CREATED, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "EO:%" PRI_EO " not created", eo); + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "Invalid notif cfg given!"); + + ret = queue_disable_all(eo_elem); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "queue_disable_all() failed!"); + + /* + * Request each core to run locally the eo_remove_queue_all_local() function + * and when all are done call eo_remove_queue_all_done_callback(). + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + return eo_remove_queue_all_local_req(eo_elem, delete_queues, + num_notif, notif_tbl); +} + +em_status_t +em_eo_remove_queue_all_sync(em_eo_t eo, int delete_queues) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_NOT_CREATED, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, + "EO:%" PRI_EO " not created", eo); + + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + ret = queue_disable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_all_sync_error; + + /* + * Request each core to run locally the eo_remove_queue_all_sync_local() function + * and when all are done call eo_remove_queue_all_sync_done_callback(). + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + ret = eo_remove_queue_all_sync_local_req(eo_elem, delete_queues); + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_all_sync_error; + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all concerned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + return EM_OK; + +eo_remove_queue_all_sync_error: + locm->sync_api.in_progress = false; + + return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Failure: EO:%" PRI_EO "", eo); +} + +em_status_t +em_eo_register_error_handler(em_eo_t eo, em_error_handler_t handler) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + + RETURN_ERROR_IF(eo_elem == NULL || handler == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, + "Invalid args: EO:%" PRI_EO " handler:%p", eo, handler); + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_NOT_CREATED, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, + "EO:%" PRI_EO " not created", eo); + + env_spinlock_lock(&eo_elem->lock); + eo_elem->error_handler_func = handler; + env_spinlock_unlock(&eo_elem->lock); + + return EM_OK; +} + +em_status_t +em_eo_unregister_error_handler(em_eo_t eo) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, + EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, + "Invalid EO id %" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_NOT_CREATED, + EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, + "EO not created:%" PRI_EO "", eo); + + env_spinlock_lock(&eo_elem->lock); + eo_elem->error_handler_func = NULL; + env_spinlock_unlock(&eo_elem->lock); + + return EM_OK; +} + +em_status_t +em_eo_start(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf, + int num_notif, const em_notif_t notif_tbl[]) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, EM_ESCOPE_EO_START, + "Invalid EO id %" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_NOT_CREATED, EM_ESCOPE_EO_START, + "EO not created:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_START, + "EO invalid state, cannot start:%d", eo_elem->state); + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_START, + "Invalid notif cfg given!"); + + eo_elem->state = EM_EO_STATE_STARTING; + + /* Create a stash to buffer events sent during EO-start */ + eo_elem->stash = eo_start_stash_create(); + if (unlikely(eo_elem->stash == ODP_STASH_INVALID)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " start stash creation fails", eo); + goto eo_start_error; + } + /* This core is in the EO start function: buffer all sent events */ + locm->start_eo_elem = eo_elem; + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO start functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = (uint16_t)(uintptr_t)eo; + + locm->current.q_elem = &tmp_q_elem; + /* Call the global EO start function */ + ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + locm->start_eo_elem = NULL; + + /* Store the return value of the actual EO global start function */ + if (result != NULL) + *result = ret; + + if (unlikely(ret != EM_OK)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " start func fails:0x%08x", + eo, ret); + /* user error handler might change error from own eo-start */ + if (ret != EM_OK) + goto eo_start_error; + } + + if (eo_elem->start_local_func != NULL) { + /* + * Notifications sent when the local start functions + * have completed. + */ + ret = eo_start_local_req(eo_elem, num_notif, notif_tbl); + + if (unlikely(ret != EM_OK)) { + INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " local start func fails", + eo); + /* Can't allow user err handler to change error here */ + goto eo_start_error; + } + /* + * Note: Return here, queues will be enabled after the local + * start funcs complete. + * EO state changed to 'EM_EO_STATE_RUNNING' after successful + * completion of EO local starts on all cores. + */ + return EM_OK; + } + + /* + * Enable all the EO's queues. + * Note: if local start functions are given then enable can be done only + * after they have been run on each core. + */ + ret = queue_enable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_start_error; + + eo_elem->state = EM_EO_STATE_RUNNING; + + /* Send events buffered during the EO-start/local-start functions */ + eo_start_send_buffered_events(eo_elem); + + if (num_notif > 0) { + /* Send notifications if requested */ + ret = send_notifs(num_notif, notif_tbl); + + if (unlikely(ret != EM_OK)) { + ret = INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " send notif fails", + eo); + /* user error handler might change error */ + if (ret != EM_OK) + goto eo_start_error; + } + } + + return EM_OK; + +eo_start_error: + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; +} + +em_status_t +em_eo_start_sync(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, EM_ESCOPE_EO_START_SYNC, + "Invalid EO id %" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_NOT_CREATED, EM_ESCOPE_EO_START_SYNC, + "EO not created:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_START_SYNC, + "EO invalid state, cannot start:%d", eo_elem->state); + + eo_elem->state = EM_EO_STATE_STARTING; + + /* Create a stash to buffer events sent during EO-start */ + eo_elem->stash = eo_start_stash_create(); + if (unlikely(eo_elem->stash == ODP_STASH_INVALID)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " start stash creation fails", eo); + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; + } + /* This core is in the EO start function: buffer all sent events */ + locm->start_eo_elem = eo_elem; + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO start functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = (uint16_t)(uintptr_t)eo; + locm->current.q_elem = &tmp_q_elem; + /* Call the global EO start function */ + ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + locm->start_eo_elem = NULL; + + /* Store the return value of the actual EO global start function */ + if (result != NULL) + *result = ret; + + if (unlikely(ret != EM_OK)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START_SYNC, + "EO:%" PRI_EO " start func fails:0x%08x", + eo, ret); + /* user error handler might change error from own eo-start */ + if (ret != EM_OK) { + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; + } + } + + if (eo_elem->start_local_func != NULL) { + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + locm->start_eo_elem = eo_elem; + locm->current.q_elem = &tmp_q_elem; + /* Call the local start on this core */ + ret = eo_elem->start_local_func(eo_elem->eo_ctx, eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + locm->start_eo_elem = NULL; + + if (unlikely(ret != EM_OK)) { + INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, + "EO:%" PRI_EO " local start func fails", eo); + /* Can't allow user err handler to change error here */ + goto eo_start_sync_error; + } + + ret = eo_start_sync_local_req(eo_elem); + if (unlikely(ret != EM_OK)) { + INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, + "EO:%" PRI_EO " eo_start_sync_local_req", eo); + /* Can't allow user err handler to change error here */ + goto eo_start_sync_error; + } + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all concerned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + /* Send events buffered during the EO-start/local-start funcs */ + eo_start_send_buffered_events(eo_elem); + /* + * EO state changed to 'EO_STATE_RUNNING' after successful + * completion of EO local starts on all cores. + */ + return EM_OK; + } + + /* + * Enable all the EO's queues. + * Note: if local start functions are given then enable can be done only + * after they have been run on each core. + */ + ret = queue_enable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_start_sync_error; + + eo_elem->state = EM_EO_STATE_RUNNING; + + /* Send events buffered during the EO-start/local-start functions */ + eo_start_send_buffered_events(eo_elem); + return EM_OK; + +eo_start_sync_error: + locm->sync_api.in_progress = false; + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; +} + +em_status_t +em_eo_stop(em_eo_t eo, int num_notif, const em_notif_t notif_tbl[]) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), + EM_ERR_BAD_ARG, EM_ESCOPE_EO_STOP, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP, + "EO invalid state, cannot stop:%d", eo_elem->state); + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP, + "Invalid notif cfg given!"); + + eo_elem->state = EM_EO_STATE_STOPPING; + + /* + * Disable all queues. + * It doesn't matter if some of the queues are already disabled. + */ + queue_disable_all(eo_elem); + + /* + * Notifications sent when the local stop functions + * have completed. EO global stop called when all local stops have + * been completed. EO state changed to 'stopped' only after completing + * the EO global stop function. + */ + ret = eo_stop_local_req(eo_elem, num_notif, notif_tbl); + + if (unlikely(ret != EM_OK)) { + eo_elem->state = EM_EO_STATE_ERROR; + INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP, + "EO:%" PRI_EO " local stop func fails", eo); + /* Can't allow user err handler to change error here */ + return ret; + } + + return EM_OK; +} + +em_status_t +em_eo_stop_sync(em_eo_t eo) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), + EM_ERR_BAD_ARG, EM_ESCOPE_EO_STOP_SYNC, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP_SYNC, + "EO invalid state, cannot stop:%d", eo_elem->state); + + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + eo_elem->state = EM_EO_STATE_STOPPING; + + /* + * Disable all queues. + * It doesn't matter if some of the queues are already disabled. + */ + ret = queue_disable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_stop_sync_error; + + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO stop functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = (uint16_t)(uintptr_t)eo; + + if (eo_elem->stop_local_func != NULL) { + locm->current.q_elem = &tmp_q_elem; + /* Call the local stop on this core */ + ret = eo_elem->stop_local_func(eo_elem->eo_ctx, eo_elem->eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + if (unlikely(ret != EM_OK)) + goto eo_stop_sync_error; + } + + /* + * Notifications sent when the local stop functions have completed. + * EO global stop called when all local stops have been completed. + * EO state changed to 'stopped' only after completing the EO global + * stop function. + */ + ret = eo_stop_sync_local_req(eo_elem); + + if (unlikely(ret != EM_OK)) { + eo_elem->state = EM_EO_STATE_ERROR; + INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, + "EO:%" PRI_EO " local stop func fails", eo); + /* Can't allow user err handler to change error here */ + goto eo_stop_sync_error; + } + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all concerned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + /* Change state here to allow em_eo_delete() from EO global stop */ + eo_elem->state = EM_EO_STATE_CREATED; /* == stopped */ + + locm->current.q_elem = &tmp_q_elem; + /* + * Call the Global EO stop function now that all + * EO local stop functions are done. + */ + ret = eo_elem->stop_func(eo_elem->eo_ctx, eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP_SYNC, + "EO:%" PRI_EO " stop-func failed", eo); + /* + * Note: the EO might not be available after this if the EO global stop + * called em_eo_delete()! + */ + return EM_OK; + +eo_stop_sync_error: + locm->sync_api.in_progress = false; + return INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, + "Failure: EO:%" PRI_EO "", eo); +} + +em_eo_t +em_eo_current(void) +{ + return eo_current(); +} + +void * +em_eo_get_context(em_eo_t eo) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + em_eo_state_t eo_state; + + if (unlikely(EM_CHECK_LEVEL > 0 && eo_elem == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_CONTEXT, + "Invalid EO:%" PRI_EO "", eo); + return NULL; + } + + if (unlikely(EM_CHECK_LEVEL >= 2 && !eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_EO_GET_CONTEXT, + "EO:%" PRI_EO " not created!", eo); + return NULL; + } + + eo_state = eo_elem->state; + if (unlikely(EM_CHECK_LEVEL > 0 && eo_state < EM_EO_STATE_CREATED)) { + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_EO_GET_CONTEXT, + "Invalid EO state: EO:%" PRI_EO " state:%d", + eo, eo_state); + return NULL; + } + + return eo_elem->eo_ctx; +} + +em_eo_state_t +em_eo_get_state(em_eo_t eo) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + + if (unlikely(EM_CHECK_LEVEL > 0 && eo_elem == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_GET_STATE, + "Invalid EO:%" PRI_EO "", eo); + return EM_EO_STATE_UNDEF; + } + + if (unlikely(EM_CHECK_LEVEL >= 2 && !eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_EO_GET_STATE, + "EO:%" PRI_EO " not created", eo); + return EM_EO_STATE_UNDEF; + } + + return eo_elem->state; +} + +em_eo_t +em_eo_get_first(unsigned int *num) +{ + _eo_tbl_iter_idx = 0; /* reset iteration */ + const unsigned int eo_cnt = eo_count(); + + if (num) + *num = eo_cnt; + + if (eo_cnt == 0) { + _eo_tbl_iter_idx = EM_MAX_EOS; /* UNDEF = _get_next() */ + return EM_EO_UNDEF; + } + + /* find first */ + while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { + _eo_tbl_iter_idx++; + if (_eo_tbl_iter_idx >= EM_MAX_EOS) + return EM_EO_UNDEF; + } + + return eo_idx2hdl(_eo_tbl_iter_idx); +} + +em_eo_t +em_eo_get_next(void) +{ + if (_eo_tbl_iter_idx >= EM_MAX_EOS - 1) + return EM_EO_UNDEF; + + _eo_tbl_iter_idx++; + + /* find next */ + while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { + _eo_tbl_iter_idx++; + if (_eo_tbl_iter_idx >= EM_MAX_EOS) + return EM_EO_UNDEF; + } + + return eo_idx2hdl(_eo_tbl_iter_idx); +} + +em_queue_t +em_eo_queue_get_first(unsigned int *num, em_eo_t eo) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + const unsigned int max_queues = em_shm->opt.queue.max_num; + + if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_QUEUE_GET_FIRST, + "Invalid EO:%" PRI_EO "", eo); + if (num) + *num = 0; + return EM_QUEUE_UNDEF; + } + + const unsigned int num_queues = env_atomic32_get(&eo_elem->num_queues); + + if (num) + *num = num_queues; + + if (num_queues == 0) { + _eo_q_iter_idx = max_queues; /* UNDEF = _get_next() */ + return EM_QUEUE_UNDEF; + } + + /* + * An 'eo_elem' contains a linked list with all it's queues. That list + * might be modified while processing this iteration, so instead we just + * go through the whole queue table. + * This is potentially a slow implementation and perhaps worth + * re-thinking? + */ + const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; + + _eo_q_iter_idx = 0; /* reset list */ + _eo_q_iter_eo = eo; + + /* find first */ + while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || + queue_tbl->queue_elem[_eo_q_iter_idx].eo != (uint16_t)(uintptr_t)_eo_q_iter_eo) { + _eo_q_iter_idx++; + if (_eo_q_iter_idx >= max_queues) + return EM_QUEUE_UNDEF; + } + + return queue_idx2hdl(_eo_q_iter_idx); +} + +em_queue_t +em_eo_queue_get_next(void) +{ + const unsigned int max_queues = em_shm->opt.queue.max_num; + + if (_eo_q_iter_idx >= max_queues - 1) + return EM_QUEUE_UNDEF; + + _eo_q_iter_idx++; + + const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; + + /* find next */ + while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || + queue_tbl->queue_elem[_eo_q_iter_idx].eo != (uint16_t)(uintptr_t)_eo_q_iter_eo) { + _eo_q_iter_idx++; + if (_eo_q_iter_idx >= max_queues) + return EM_QUEUE_UNDEF; + } + + return queue_idx2hdl(_eo_q_iter_idx); +} + +uint64_t em_eo_to_u64(em_eo_t eo) +{ + return (uint64_t)eo; +} diff --git a/src/event_machine_event.c b/src/event_machine_event.c index efebe240..aa4b1b75 100644 --- a/src/event_machine_event.c +++ b/src/event_machine_event.c @@ -1,1899 +1,1901 @@ -/* - * Copyright (c) 2015-2023, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -em_event_t em_alloc(uint32_t size, em_event_type_t type, em_pool_t pool) -{ - const mpool_elem_t *const pool_elem = pool_elem_get(pool); - em_event_type_t major_type = em_event_type_major(type); - - if (EM_CHECK_LEVEL > 0 && - unlikely(size == 0 || !pool_elem || major_type == EM_EVENT_TYPE_TIMER_IND)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ALLOC, - "Invalid args: size:%u type:%u pool:%" PRI_POOL "", - size, type, pool); - return EM_EVENT_UNDEF; - } - if (EM_CHECK_LEVEL >= 2 && unlikely(!pool_allocated(pool_elem))) - INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_ALLOC, - "Invalid pool:%" PRI_POOL ", pool not created", pool); - - /* - * EM event pools created with type=SW can not support pkt events. - */ - if (EM_CHECK_LEVEL >= 1 && - unlikely(pool_elem->event_type == EM_EVENT_TYPE_SW && - major_type == EM_EVENT_TYPE_PACKET)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, - "EM-pool:%s(%" PRI_POOL "):\n" - "Invalid event type:0x%x for buf", - pool_elem->name, pool_elem->em_pool, type); - return EM_EVENT_UNDEF; - } - if (EM_CHECK_LEVEL >= 1 && - unlikely(pool_elem->event_type == EM_EVENT_TYPE_VECTOR && - major_type != EM_EVENT_TYPE_VECTOR)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, - "EM-pool:%s(%" PRI_POOL "):\n" - "Invalid event type:0x%x for vector", - pool_elem->name, pool_elem->em_pool, type); - return EM_EVENT_UNDEF; - } - - const em_event_t event = event_alloc(pool_elem, size, type, EVSTATE__ALLOC); - - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - em_status_t err = - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC, - "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", - pool_elem->name, size, type, pool); - if (EM_DEBUG_PRINT && err != EM_OK && - (pool_elem->stats_opt.bit.available || - pool_elem->stats_opt.bit.cache_available)) { - em_pool_info_print(pool); - } - return EM_EVENT_UNDEF; - } - - if (EM_API_HOOKS_ENABLE && event != EM_EVENT_UNDEF) - call_api_hooks_alloc(&event, 1, 1, size, type, pool); - - return event; -} - -int em_alloc_multi(em_event_t events[/*out*/], int num, - uint32_t size, em_event_type_t type, em_pool_t pool) -{ - if (unlikely(num == 0)) - return 0; - - const mpool_elem_t *const pool_elem = pool_elem_get(pool); - int ret = 0; - - if (EM_CHECK_LEVEL > 0 && - unlikely(!events || num < 0 || size == 0 || !pool_elem || - em_event_type_major(type) == EM_EVENT_TYPE_TIMER_IND)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ALLOC_MULTI, - "Invalid args: events:%p num:%d size:%u type:%u pool:%" PRI_POOL "", - events, num, size, type, pool); - return 0; - } - if (EM_CHECK_LEVEL >= 2 && unlikely(!pool_allocated(pool_elem))) - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ALLOC_MULTI, - "Invalid pool:%" PRI_POOL ", pool not created", pool); - - if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) { - /* - * EM event pools created with type=PKT can support SW events - * as well as pkt events. - */ - ret = event_alloc_pkt_multi(events, num, pool_elem, size, type); - } else if (pool_elem->event_type == EM_EVENT_TYPE_SW) { - /* - * EM event pools created with type=SW can not support - * pkt events. - */ - if (EM_CHECK_LEVEL >= 1 && - unlikely(em_event_type_major(type) == EM_EVENT_TYPE_PACKET)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC_MULTI, - "EM-pool:%s(%" PRI_POOL "): Invalid event type:0x%x for buf", - pool_elem->name, pool, type); - return 0; - } - ret = event_alloc_buf_multi(events, num, pool_elem, size, type); - } else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR) { - if (EM_CHECK_LEVEL >= 1 && - unlikely(em_event_type_major(type) != EM_EVENT_TYPE_VECTOR)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, - "EM-pool:%s(%" PRI_POOL "): Inv. event type:0x%x for vector", - pool_elem->name, pool, type); - return 0; - } - ret = event_alloc_vector_multi(events, num, pool_elem, size, type); - } - - if (unlikely(EM_CHECK_LEVEL > 0 && ret != num)) { - em_status_t err = - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC_MULTI, - "Requested num:%d events, allocated:%d\n" - "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", - num, ret, - pool_elem->name, size, type, pool); - if (EM_DEBUG_PRINT && err != EM_OK && - (pool_elem->stats_opt.bit.available || - pool_elem->stats_opt.bit.cache_available)) { - em_pool_info_print(pool); - } - } - - if (EM_API_HOOKS_ENABLE && ret > 0) - call_api_hooks_alloc(events, ret, num, size, type, pool); - - return ret; -} - -/** - * @brief Helper to check if the event is a vector - * - * @param vector_event Event handle - * @return true the event is a vector - * @return false the event is NOT a vector - */ -static inline bool is_vector_type(em_event_t vector_event) -{ - odp_event_t odp_event = event_em2odp(vector_event); - odp_event_type_t odp_etype = odp_event_type(odp_event); - - if (odp_etype == ODP_EVENT_PACKET_VECTOR) - return true; - - return false; -} - -/** - * @brief Helper to check if the event is a vector, if not report an error - * - * @param vector_event Event handle - * @param escope Error scope to use if reporting an error - * @return true the event is a vector - * @return false the event is NOT a vector, reports an error - */ -static inline bool is_vector_type_or_error(em_event_t vector_event, - em_escope_t escope) -{ - bool is_vec = is_vector_type(vector_event); - - if (likely(is_vec)) - return true; - - INTERNAL_ERROR(EM_ERR_BAD_TYPE, escope, "Event not a vector"); - return false; -} - -/** - * @brief Handle ESV state for 'em_free' for the event-table of a vector event - * - * @param event Vector event handle - */ -static void event_vector_prepare_free_full(em_event_t event, const uint16_t api_op) -{ - /* em_free() frees the vector as well as all the events it contains */ - em_event_t *ev_tbl; - uint32_t sz = event_vector_tbl(event, &ev_tbl); - - if (sz) { - event_hdr_t *ev_hdrs[sz]; - - /* same as event_to_hdr_multi(), removes gcc-12 LTO error in haswell */ - for (uint32_t i = 0; i < sz; i++) - ev_hdrs[i] = event_to_hdr(ev_tbl[i]); - - evstate_free_multi(ev_tbl, ev_hdrs, sz, api_op); - - /* drop ESV generation from event handles */ - (void)events_em2pkt_inplace(ev_tbl, sz); - } -} - -/** - * @brief Handle ESV state for 'em_event_unmark_free/_multi' for the event-table - * of a vector event. - * - * @param event Vector event handle - */ -static void event_vector_prepare_free_full__revert(em_event_t event, const uint16_t api_op) -{ - /* em_free() frees the vector as well as all the events it contains */ - em_event_t *ev_tbl; - uint32_t sz = event_vector_tbl(event, &ev_tbl); - - if (sz) { - event_hdr_t *ev_hdrs[sz]; - - event_to_hdr_multi(ev_tbl, ev_hdrs, sz); - evstate_unmark_free_multi(ev_tbl, ev_hdrs, sz, api_op); - - /* restore dropped ESV generation to event handles, unmodified in header */ - for (unsigned int i = 0; i < sz; i++) - ev_tbl[i] = ev_hdrs[i]->event; - } -} - -/** - * Helper to em_free() and em_free_multi() to determine whether timeout events - * from periodic timer rings can exist and if free() needs to check for them. - * Active tmo events from ring timers must never be freed (user error), only - * inactive (last tmo event after cancel) can be freed. - */ -static inline bool timer_rings_used(void) -{ - return em_shm->timers.num_ring_create_calls > 0 ? true : false; -} - -void em_free(em_event_t event) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE, - "event undefined!"); - return; - } - - odp_event_t odp_event = event_em2odp(event); - const bool esv_ena = esv_enabled(); - /* Is a check for an active periodic tmo event from a timer ring needed? */ - const bool check_tmos = EM_CHECK_LEVEL >= 2 && timer_rings_used(); - - if (unlikely(check_tmos && odp_event_type(odp_event) == ODP_EVENT_TIMEOUT)) { - event_hdr_t *ev_hdr = event_to_hdr(event); - - if (unlikely(ev_hdr->flags.tmo_type != EM_TMO_TYPE_NONE)) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_FREE, - "Can't free active TIMER event"); - return; - } - if (esv_ena) - evstate_free(event, ev_hdr, EVSTATE__FREE); - } else if (esv_ena) { - event_hdr_t *ev_hdr = event_to_hdr(event); - - evstate_free(event, ev_hdr, EVSTATE__FREE); - if (is_vector_type(event)) - event_vector_prepare_free_full(event, EVSTATE__FREE); - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_free(&event, 1); - - odp_event_free(odp_event); -} - -/** - * Helper to em_free_multi() to remove active periodic tmo events - * (from ring timers) from the free list. - * - * Active tmo events from ring timers must never be freed (user error), only - * inactive (last tmo event after cancel) can be freed. Thus remove the active - * ones if the user incorrectly tries to free them. - */ -static inline int -rem_active_ring_timer_tmos(const int num, odp_event_t odp_evtbl[/*in/out*/], - event_hdr_t *ev_hdr_tbl[/*in/out*/], - em_event_t ev_tbl[/*in/out*/]) -{ - int first_tmo_idx = -1; - - /* find first active tmo-event */ - for (int i = 0; i < num; i++) { - if (unlikely(odp_event_type(odp_evtbl[i]) == ODP_EVENT_TIMEOUT && - ev_hdr_tbl[i]->flags.tmo_type != EM_TMO_TYPE_NONE)) { - first_tmo_idx = i; - break; - } - } - - /* - * No active tmo events found - all OK, return. - * This is the normal, no-error, scenario. - */ - if (likely(first_tmo_idx == -1)) - return num; - - /* - * Error: Active tmo events found - remove them from the arrays - */ - - /* last event is tmo, no need to move/copy anything, just drop last */ - if (first_tmo_idx == num - 1) - return num - 1; - - /* - * Store indexes of "normal events" (i.e. events other than active - * tmo events) to copy from 'first_tmo_idx + 1' onwards. - */ - int num_cpy = 0; - int cpy_idx[num - first_tmo_idx - 1]; - - for (int i = first_tmo_idx + 1; i < num; i++) { - if (likely(!(odp_event_type(odp_evtbl[i]) == ODP_EVENT_TIMEOUT && - ev_hdr_tbl[i]->flags.tmo_type != EM_TMO_TYPE_NONE))) - cpy_idx[num_cpy++] = i; - } - - /* all further events were active tmo events, drop them */ - if (num_cpy == 0) - return first_tmo_idx; - - /* - * Remove all active tmo events from the arrays by copying the "normal" - * events into the slots occupied by the active tmo events. - */ - for (int i = 0; i < num_cpy; i++) { - int src_idx = cpy_idx[i]; - int dst_idx = first_tmo_idx + i; - - odp_evtbl[dst_idx] = odp_evtbl[src_idx]; - ev_hdr_tbl[dst_idx] = ev_hdr_tbl[src_idx]; - ev_tbl[dst_idx] = ev_tbl[src_idx]; - } - - return first_tmo_idx + num_cpy; -} - -void em_free_multi(em_event_t events[], int num) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(!events || num < 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, - "Inv.args: events[]:%p num:%d", events, num); - return; - } - if (unlikely(num == 0)) - return; - - if (EM_CHECK_LEVEL >= 3) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, - "events[%d] undefined!", i); - return; - } - } - - int num_free = num; - const bool esv_ena = esv_enabled(); - odp_event_t odp_events[num]; - - events_em2odp(events, odp_events/*out*/, num); - - /* Is a check for active periodic tmo events from timer rings needed? */ - const bool check_tmos = EM_CHECK_LEVEL >= 2 && timer_rings_used(); - - if (check_tmos || esv_ena) { - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - - if (check_tmos) { - num_free = rem_active_ring_timer_tmos(num, odp_events, ev_hdrs, events); - if (unlikely(num_free != num)) - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_FREE_MULTI, - "Can't free active TIMER events: %d of %d ignored", - num_free, num); - } - - if (esv_ena) { - evstate_free_multi(events, ev_hdrs, num_free, EVSTATE__FREE_MULTI); - - for (int i = 0; i < num_free; i++) { - if (is_vector_type(events[i])) - event_vector_prepare_free_full(events[i], - EVSTATE__FREE_MULTI); - } - } - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_free(events, num_free); - - odp_event_free_multi(odp_events, num_free); -} - -/** - * Helper to em_send(). - * Send out of EM via event-chaining and a user-provided function - * 'event_send_device()' to another device - */ -static inline em_status_t -send_external(em_event_t event, em_queue_t queue) -{ - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(&event, 1, queue, EM_EVENT_GROUP_UNDEF); - - em_status_t stat = send_chaining(event, queue); - - if (EM_CHECK_LEVEL == 0) - return stat; - - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND, - "send out-of-EM via event-chaining failed: Q:%" PRI_QUEUE "", queue); - return EM_OK; -} - -/** - * Helper to em_send_multi(). - * Send out of EM via event-chaining and a user-provided function - * 'event_send_device()' to another device - */ -static inline int -send_external_multi(const em_event_t events[], int num, em_queue_t queue) -{ - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(events, num, queue, EM_EVENT_GROUP_UNDEF); - - int num_sent = send_chaining_multi(events, num, queue); - - if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_SEND_MULTI, - "send_chaining_multi: req:%d, sent:%d", - num, num_sent); - } - - return num_sent; -} - -/** - * Helper to em_send(). - * Send to an EM internal queue. - */ -static inline em_status_t -send_internal(em_event_t event, event_hdr_t *ev_hdr, em_queue_t queue) -{ - queue_elem_t *q_elem = queue_elem_get(queue); - em_status_t stat; - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !q_elem, - EM_ERR_BAD_ARG, EM_ESCOPE_SEND, - "Invalid queue:%" PRI_QUEUE "", queue); - RETURN_ERROR_IF(EM_CHECK_LEVEL >= 2 && !queue_allocated(q_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_SEND, - "Invalid queue:%" PRI_QUEUE "", queue); - - /* Buffer events sent from EO-start to scheduled queues */ - if (unlikely(em_locm.start_eo_elem && q_elem->flags.scheduled)) { - /* - * em_send() called from within an EO-start function: - * all events sent to scheduled queues will be buffered - * and sent when the EO-start operation completes. - */ - if (esv_enabled()) - evstate_usr2em(event, ev_hdr, EVSTATE__SEND); - - int num_sent = eo_start_buffer_events(&event, 1, queue); - - if (unlikely(num_sent != 1)) { - stat = EM_ERR_OPERATION_FAILED; - goto error_return; - } - - return EM_OK; /* Success */ - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(&event, 1, queue, EM_EVENT_GROUP_UNDEF); - - if (q_elem->type == EM_QUEUE_TYPE_OUTPUT) { - /* - * Send out of EM via an EM output-queue and a user provided - * function of type em_output_func_t - */ - stat = send_output(event, q_elem); - - if (unlikely(stat != EM_OK)) - goto error_return_noesv; - - return EM_OK; /* Success */ - } - - /* - * Normal send to a queue on this device - */ - if (esv_enabled()) - evstate_usr2em(event, ev_hdr, EVSTATE__SEND); - - switch (q_elem->type) { - case EM_QUEUE_TYPE_ATOMIC: - case EM_QUEUE_TYPE_PARALLEL: - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - stat = send_event(event, q_elem); - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - stat = queue_unsched_enqueue(event, q_elem); - break; - case EM_QUEUE_TYPE_LOCAL: - stat = send_local(event, q_elem); - break; - default: - stat = EM_ERR_NOT_FOUND; - break; - } - - if (likely(stat == EM_OK)) - return EM_OK; /* Success */ - -error_return: - if (esv_enabled()) - evstate_usr2em_revert(event, ev_hdr, EVSTATE__SEND__FAIL); -error_return_noesv: - if (EM_CHECK_LEVEL == 0) - return stat; - stat = INTERNAL_ERROR(stat, EM_ESCOPE_SEND, - "send failed: Q:%" PRI_QUEUE " type:%" PRI_QTYPE "", - queue, q_elem->type); - return stat; -} - -/** - * Helper to em_send_multi(). - * Send to an EM internal queue. - */ -static inline int -send_internal_multi(const em_event_t events[], event_hdr_t *ev_hdrs[], - int num, em_queue_t queue) -{ - queue_elem_t *q_elem = queue_elem_get(queue); - int num_sent; - - if (EM_CHECK_LEVEL > 0 && unlikely(!q_elem)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_SEND_MULTI, - "Invalid queue:%" PRI_QUEUE "", queue); - return 0; - } - if (EM_CHECK_LEVEL >= 2 && unlikely(!queue_allocated(q_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_SEND_MULTI, - "Invalid queue:%" PRI_QUEUE "", queue); - return 0; - } - - /* Buffer events sent from EO-start to scheduled queues */ - if (unlikely(em_locm.start_eo_elem && q_elem->flags.scheduled)) { - /* - * em_send_multi() called from within an EO-start function: - * all events sent to scheduled queues will be buffered - * and sent when the EO-start operation completes. - */ - if (esv_enabled()) - evstate_usr2em_multi(events, ev_hdrs, num, - EVSTATE__SEND_MULTI); - num_sent = eo_start_buffer_events(events, num, queue); - - if (unlikely(num_sent != num)) - goto error_return; - - return num_sent; /* Success */ - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(events, num, queue, EM_EVENT_GROUP_UNDEF); - - if (q_elem->type == EM_QUEUE_TYPE_OUTPUT) { - /* - * Send out of EM via an EM output-queue and a user provided - * function of type em_output_func_t - */ - num_sent = send_output_multi(events, num, q_elem); - - if (unlikely(num_sent != num)) - goto error_return_noesv; - - return num_sent; /* Success */ - } - - /* - * Normal send to a queue on this device - */ - if (esv_enabled()) - evstate_usr2em_multi(events, ev_hdrs, num, EVSTATE__SEND_MULTI); - - switch (q_elem->type) { - case EM_QUEUE_TYPE_ATOMIC: - case EM_QUEUE_TYPE_PARALLEL: - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - num_sent = send_event_multi(events, num, q_elem); - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - num_sent = queue_unsched_enqueue_multi(events, num, q_elem); - break; - case EM_QUEUE_TYPE_LOCAL: - num_sent = send_local_multi(events, num, q_elem); - break; - default: - num_sent = 0; - break; - } - - if (likely(num_sent == num)) - return num_sent; /* Success */ - -error_return: - if (esv_enabled()) - evstate_usr2em_revert_multi(&events[num_sent], - &ev_hdrs[num_sent], - num - num_sent, - EVSTATE__SEND_MULTI__FAIL); -error_return_noesv: - if (EM_CHECK_LEVEL > 0) - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_MULTI, - "send-multi failed: req:%d, sent:%d", - num, num_sent); - return num_sent; -} - -em_status_t em_send(em_event_t event, em_queue_t queue) -{ - const bool is_external = queue_external(queue); - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_SEND, "Invalid event"); - - event_hdr_t *ev_hdr = event_to_hdr(event); - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, - EM_ERR_BAD_ARG, EM_ESCOPE_SEND, "Timer-ring event can't be sent"); - - /* avoid unnecessary writing 'undef' in case event is a ref */ - if (ev_hdr->egrp != EM_EVENT_GROUP_UNDEF) - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - - /* - * External queue belongs to another EM instance, send out via EMC/BIP - */ - if (is_external) - return send_external(event, queue); - - /* - * Queue belongs to this EM instance - */ - return send_internal(event, ev_hdr, queue); -} - -/* - * em_send_group_multi() helper: check events - */ -static inline em_status_t -send_multi_check_events(const em_event_t events[], int num) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(!events || num <= 0)) - return EM_ERR_BAD_ARG; - - if (EM_CHECK_LEVEL >= 3) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) - return EM_ERR_BAD_POINTER; - } - - return EM_OK; -} - -int em_send_multi(const em_event_t events[], int num, em_queue_t queue) -{ - const bool is_external = queue_external(queue); - event_hdr_t *ev_hdrs[num]; - - /* Check events */ - em_status_t err = send_multi_check_events(events, num); - - if (unlikely(err != EM_OK)) { - INTERNAL_ERROR(err, EM_ESCOPE_SEND_MULTI, - "Invalid events:%p num:%d", events, num); - return 0; - } - - event_to_hdr_multi(events, ev_hdrs, num); - - for (int i = 0; i < num; i++) { - if (EM_CHECK_LEVEL > 0 && - unlikely(ev_hdrs[i]->event_type == EM_EVENT_TYPE_TIMER_IND)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_SEND_MULTI, - "Timer-ring event[%d] can't be sent", i); - return 0; - } - /* avoid unnecessary writing 'undef' in case event is a ref */ - if (ev_hdrs[i]->egrp != EM_EVENT_GROUP_UNDEF) - ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; - } - - /* - * External queue belongs to another EM instance, send out via EMC/BIP - */ - if (is_external) - return send_external_multi(events, num, queue); - - /* - * Queue belongs to this EM instance - */ - return send_internal_multi(events, ev_hdrs, num, queue); -} - -void *em_event_pointer(em_event_t event) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_POINTER, - "event undefined!"); - return NULL; - } - - void *ev_ptr = event_pointer(event); - - if (EM_CHECK_LEVEL > 0 && unlikely(!ev_ptr)) - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER, - "Event pointer NULL (unsupported event type)"); - - return ev_ptr; -} - -void *em_event_pointer_and_size(em_event_t event, uint32_t *size /*out*/) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_POINTER_AND_SIZE, - "event undefined!"); - return NULL; - } - - if (!size) { - /* User not interested in 'size', - * fall back to em_event_pointer() functionality - */ - void *ev_ptr = event_pointer(event); - - if (EM_CHECK_LEVEL > 0 && unlikely(!ev_ptr)) - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER_AND_SIZE, - "Event pointer NULL (unsupported event type)"); - return ev_ptr; - } - - const odp_event_t odp_event = event_em2odp(event); - const odp_event_type_t odp_etype = odp_event_type(odp_event); - uint32_t event_size = 0; - void *ev_ptr = NULL; /* return value */ - - if (odp_etype == ODP_EVENT_PACKET) { - const odp_packet_t odp_pkt = odp_packet_from_event(odp_event); - - ev_ptr = odp_packet_data_seg_len(odp_pkt, &event_size); - } else if (odp_etype == ODP_EVENT_BUFFER) { - const odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); - const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf); - const uint32_t align_offset = ev_hdr->align_offset; - - ev_ptr = odp_buffer_addr(odp_buf); - if (align_offset) - ev_ptr = (void *)((uintptr_t)ev_ptr + 32 - align_offset); - event_size = ev_hdr->event_size; - } - - if (EM_CHECK_LEVEL > 0 && unlikely(!ev_ptr)) { - INTERNAL_ERROR(EM_ERR_BAD_TYPE, EM_ESCOPE_EVENT_POINTER_AND_SIZE, - "Event pointer NULL (odp event type:%u)", odp_etype); - /* NULL for unrecognized odp_etype, also for vectors and timer ring tmos */ - return NULL; - } - - *size = event_size; - return ev_ptr; -} - -uint32_t em_event_get_size(em_event_t event) -{ - if (unlikely(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_SIZE, - "event undefined!"); - return 0; - } - - const odp_event_t odp_event = event_em2odp(event); - const odp_event_type_t odp_etype = odp_event_type(odp_event); - - if (odp_etype == ODP_EVENT_PACKET) { - odp_packet_t odp_pkt = odp_packet_from_event(odp_event); - - return odp_packet_seg_len(odp_pkt); - } else if (odp_etype == ODP_EVENT_BUFFER) { - odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); - const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf); - - return ev_hdr->event_size; - } else if (odp_etype == ODP_EVENT_TIMEOUT) { - return 0; - } - - if (EM_CHECK_LEVEL > 0) - INTERNAL_ERROR(EM_ERR_NOT_FOUND, EM_ESCOPE_EVENT_GET_SIZE, - "Unexpected odp event type:%u", odp_etype); - return 0; -} - -static inline odp_pool_t event_get_odp_pool(em_event_t event) -{ - odp_event_t odp_event = event_em2odp(event); - -#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API - return odp_event_pool(odp_event); -#else - odp_event_type_t type = odp_event_type(odp_event); - odp_pool_t odp_pool = ODP_POOL_INVALID; - - if (type == ODP_EVENT_PACKET) { - odp_packet_t pkt = odp_packet_from_event(odp_event); - - odp_pool = odp_packet_pool(pkt); - } else if (type == ODP_EVENT_BUFFER) { - odp_buffer_t buf = odp_buffer_from_event(odp_event); - - odp_pool = odp_buffer_pool(buf); - } else if (type == ODP_EVENT_PACKET_VECTOR) { - odp_packet_vector_t pktvec = odp_packet_vector_from_event(odp_event); - - odp_pool = odp_packet_vector_pool(pktvec); - } - - return odp_pool; -#endif -} - -em_pool_t em_event_get_pool(em_event_t event) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_POOL, - "event undefined!"); - return EM_POOL_UNDEF; - } - - odp_pool_t odp_pool = event_get_odp_pool(event); - - if (unlikely(odp_pool == ODP_POOL_INVALID)) - return EM_POOL_UNDEF; - - em_pool_t pool = pool_odp2em(odp_pool); - - /* - * Don't report an error if 'pool == EM_POOL_UNDEF' since that might - * happen if the event is e.g. input from pktio that is using external - * (to EM) odp pools. - */ - return pool; -} - -em_pool_t em_event_get_pool_subpool(em_event_t event, int *subpool /*out*/) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_POOL_SUBPOOL, - "event undefined!"); - return EM_POOL_UNDEF; - } - - odp_pool_t odp_pool = event_get_odp_pool(event); - - if (unlikely(odp_pool == ODP_POOL_INVALID)) - return EM_POOL_UNDEF; - - pool_subpool_t pool_subpool = pool_subpool_odp2em(odp_pool); - - if (unlikely(pool_subpool.pool == (uint32_t)(uintptr_t)EM_POOL_UNDEF)) - return EM_POOL_UNDEF; - - if (subpool) - *subpool = pool_subpool.subpool; - - return (em_pool_t)(uintptr_t)pool_subpool.pool; -} - -em_status_t em_event_set_type(em_event_t event, em_event_type_t newtype) -{ - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_SET_TYPE, "event undefined!"); - - /* similar to 'ev_hdr = event_to_hdr(event)', slightly extended: */ - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t evtype = odp_event_type(odp_event); - event_hdr_t *ev_hdr; - - switch (evtype) { - case ODP_EVENT_PACKET: { - odp_packet_t odp_pkt = odp_packet_from_event(odp_event); - - ev_hdr = odp_packet_user_area(odp_pkt); - break; - } - case ODP_EVENT_BUFFER: { - odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); - - ev_hdr = odp_buffer_user_area(odp_buf); - break; - } - case ODP_EVENT_PACKET_VECTOR: { - odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event); - em_event_type_t new_major = em_event_type_major(newtype); - - if (EM_CHECK_LEVEL >= 1) - RETURN_ERROR_IF(new_major != EM_EVENT_TYPE_VECTOR, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_SET_TYPE, - "Event type:0x%x not suitable for a vector", newtype); - ev_hdr = odp_packet_vector_user_area(odp_pktvec); - break; - } - default: - return INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_SET_TYPE, - "Unsupported odp event type:%u", evtype); - } - - ev_hdr->event_type = newtype; - - return EM_OK; -} - -em_event_type_t em_event_get_type(em_event_t event) -{ - const event_hdr_t *ev_hdr; - - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_TYPE, - "event undefined!"); - return EM_EVENT_TYPE_UNDEF; - } - - ev_hdr = event_to_hdr(event); - - if (EM_CHECK_LEVEL >= 3 && unlikely(ev_hdr == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_GET_TYPE, - "ev_hdr == NULL"); - return EM_EVENT_TYPE_UNDEF; - } - - return ev_hdr->event_type; -} - -int em_event_get_type_multi(const em_event_t events[], int num, - em_event_type_t types[/*out:num*/]) -{ - int i; - - /* Check all args */ - if (EM_CHECK_LEVEL > 0) { - if (unlikely(!events || num < 0 || !types)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_GET_TYPE_MULTI, - "Inv.args: events:%p num:%d types:%p", - events, num, types); - return 0; - } - if (unlikely(!num)) - return 0; - } - - if (EM_CHECK_LEVEL >= 3) { - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, - EM_ESCOPE_EVENT_GET_TYPE_MULTI, - "events[%d] undefined!", i); - return 0; - } - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - - for (i = 0; i < num; i++) - types[i] = ev_hdrs[i]->event_type; - - return num; -} - -int em_event_same_type_multi(const em_event_t events[], int num, - em_event_type_t *same_type /*out*/) -{ - /* Check all args */ - if (EM_CHECK_LEVEL > 0) { - if (unlikely(!events || num < 0 || !same_type)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_SAME_TYPE_MULTI, - "Inv.args: events:%p num:%d same_type:%p", - events, num, same_type); - return 0; - } - if (unlikely(!num)) - return 0; - } - - if (EM_CHECK_LEVEL >= 3) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_SAME_TYPE_MULTI, - "events[%d] undefined!", i); - return 0; - } - } - - const em_event_type_t type = event_to_hdr(events[0])->event_type; - int same = 1; - - for (; same < num && type == event_to_hdr(events[same])->event_type; - same++) - ; - - *same_type = type; - return same; -} - -em_status_t em_event_mark_send(em_event_t event, em_queue_t queue) -{ - if (!esv_enabled()) - return EM_OK; - - /* Check all args */ - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, - "Inv.args: event:%" PRI_EVENT "", event); - if (EM_CHECK_LEVEL >= 3) { - const queue_elem_t *const q_elem = queue_elem_get(queue); - - RETURN_ERROR_IF(!q_elem, EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, - "Inv.args: Q:%" PRI_QUEUE "", queue); - RETURN_ERROR_IF(!queue_allocated(q_elem) || !q_elem->flags.scheduled, - EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_MARK_SEND, - "Inv.queue:%" PRI_QUEUE " type:%" PRI_QTYPE "", - queue, q_elem->type); - } - - event_hdr_t *ev_hdr = event_to_hdr(event); - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, "Timer-ring event not allowed"); - - /* avoid unnecessary writing 'undef' in case event is a ref */ - if (ev_hdr->egrp != EM_EVENT_GROUP_UNDEF) - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - - evstate_usr2em(event, ev_hdr, EVSTATE__MARK_SEND); - - /* - * Data memory barrier, we are bypassing em_send(), odp_queue_enq() - * and need to guarantee memory sync before the event ends up into an - * EM queue again. - */ - odp_mb_full(); - - return EM_OK; -} - -em_status_t em_event_unmark_send(em_event_t event) -{ - if (!esv_enabled()) - return EM_OK; - - /* Check all args */ - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_SEND, - "Inv.args: event:%" PRI_EVENT "", event); - - event_hdr_t *ev_hdr = event_to_hdr(event); - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_SEND, - "Timer-ring event not allowed"); - - evstate_unmark_send(event, ev_hdr); - - return EM_OK; -} - -void em_event_mark_free(em_event_t event) -{ - if (!esv_enabled()) - return; - - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE, - "Event undefined!"); - return; - } - - event_hdr_t *const ev_hdr = event_to_hdr(event); - - if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE, - "Timer-ring event not allowed"); - return; - } - - evstate_free(event, ev_hdr, EVSTATE__MARK_FREE); - - if (is_vector_type(event)) - event_vector_prepare_free_full(event, EVSTATE__MARK_FREE); -} - -void em_event_unmark_free(em_event_t event) -{ - if (!esv_enabled()) - return; - - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE, - "Event undefined!"); - return; - } - - event_hdr_t *const ev_hdr = event_to_hdr(event); - - if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE, - "Timer-ring event not allowed"); - return; - } - - evstate_unmark_free(event, ev_hdr, EVSTATE__UNMARK_FREE); - if (is_vector_type(event)) - event_vector_prepare_free_full__revert(event, EVSTATE__UNMARK_FREE); -} - -void em_event_mark_free_multi(const em_event_t events[], int num) -{ - if (!esv_enabled()) - return; - - if (EM_CHECK_LEVEL > 0 && unlikely(!events || num < 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE_MULTI, - "Inv.args: events[]:%p num:%d", events, num); - return; - } - if (unlikely(num == 0)) - return; - - if (EM_CHECK_LEVEL >= 3) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_MARK_FREE_MULTI, - "events[%d] undefined!", i); - return; - } - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - - for (int i = 0; i < num; i++) { - if (EM_CHECK_LEVEL > 0 && - unlikely(ev_hdrs[i]->event_type == EM_EVENT_TYPE_TIMER_IND)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE_MULTI, - "Timer-ring event[%d] not allowed", i); - continue; - } - - evstate_free(events[i], ev_hdrs[i], EVSTATE__MARK_FREE_MULTI); - if (is_vector_type(events[i])) - event_vector_prepare_free_full(events[i], EVSTATE__MARK_FREE_MULTI); - } -} - -void em_event_unmark_free_multi(const em_event_t events[], int num) -{ - if (!esv_enabled()) - return; - - if (unlikely(!events || num < 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, - "Inv.args: events[]:%p num:%d", events, num); - return; - } - if (unlikely(num == 0)) - return; - - if (EM_CHECK_LEVEL >= 3) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, - "events[%d] undefined!", i); - return; - } - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - - for (int i = 0; i < num; i++) { - if (EM_CHECK_LEVEL > 0 && - unlikely(ev_hdrs[i]->event_type == EM_EVENT_TYPE_TIMER_IND)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, - "Timer-ring event[%d] not allowed", i); - continue; - } - - evstate_unmark_free(events[i], ev_hdrs[i], EVSTATE__UNMARK_FREE_MULTI); - if (is_vector_type(events[i])) - event_vector_prepare_free_full__revert(events[i], - EVSTATE__UNMARK_FREE_MULTI); - } -} - -static em_event_t event_clone_part(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/, - uint32_t offset, uint32_t len, bool clone_uarea, - em_escope_t escope) -{ - const mpool_elem_t *pool_elem = pool_elem_get(pool); - /* use escope to distinguish between em_event_clone() and em_event_clone_part() */ - const bool is_clone_part = escope == EM_ESCOPE_EVENT_CLONE_PART ? true : false; - - /* Check all args */ - if (EM_CHECK_LEVEL > 0 && - unlikely(event == EM_EVENT_UNDEF || - (pool != EM_POOL_UNDEF && !pool_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, escope, - "Inv.args: event:%" PRI_EVENT " pool:%" PRI_POOL "", - event, pool); - return EM_EVENT_UNDEF; - } - - if (EM_CHECK_LEVEL >= 2 && - unlikely(pool_elem && !pool_allocated(pool_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, escope, - "Inv.args: pool:%" PRI_POOL " not created", pool); - return EM_EVENT_UNDEF; - } - - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t odp_evtype = odp_event_type(odp_event); - odp_pool_t odp_pool = ODP_POOL_INVALID; - odp_packet_t pkt = ODP_PACKET_INVALID; - odp_buffer_t buf = ODP_BUFFER_INVALID; - - if (unlikely(odp_evtype != ODP_EVENT_PACKET && - odp_evtype != ODP_EVENT_BUFFER)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, escope, - "Inv. odp-event-type:%d", odp_evtype); - return EM_EVENT_UNDEF; - } - - /* Obtain the event-hdr, event-size and the pool to use */ - const event_hdr_t *ev_hdr; - uint32_t size; - em_event_type_t type; - em_pool_t em_pool = pool; - event_hdr_t *clone_hdr; - em_event_t clone_event; /* return value */ - - if (odp_evtype == ODP_EVENT_PACKET) { - pkt = odp_packet_from_event(odp_event); - ev_hdr = odp_packet_user_area(pkt); - size = odp_packet_seg_len(pkt); - if (pool == EM_POOL_UNDEF) { - odp_pool = odp_packet_pool(pkt); - em_pool = pool_odp2em(odp_pool); - } - } else /* ODP_EVENT_BUFFER */ { - buf = odp_buffer_from_event(odp_event); - ev_hdr = odp_buffer_user_area(buf); - size = ev_hdr->event_size; - if (pool == EM_POOL_UNDEF) { - odp_pool = odp_buffer_pool(buf); - em_pool = pool_odp2em(odp_pool); - } - } - - if (is_clone_part) { - if (EM_CHECK_LEVEL >= 1) { - uint64_t offset64 = offset; - uint64_t len64 = len; - uint64_t size64 = size; - - if (unlikely(len == 0 || offset64 + len64 > size64)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, escope, - "Inv.args: offset=%u len=%u (0 < offset+len <= %u)", - offset, len, size); - return EM_EVENT_UNDEF; - } - } - if (len < size) - size = len; - } - - /* No EM-pool found */ - if (em_pool == EM_POOL_UNDEF) { - if (unlikely(odp_evtype == ODP_EVENT_BUFFER)) { - INTERNAL_ERROR(EM_ERR_NOT_FOUND, escope, - "No suitable event-pool found"); - return EM_EVENT_UNDEF; - } - /* odp_evtype == ODP_EVENT_PACKET: - * Not an EM-pool, e.g. event from external pktio odp-pool. - * Allocate and clone pkt via ODP directly. - */ - clone_event = pkt_clone_odp(pkt, odp_pool, offset, size, is_clone_part); - if (unlikely(clone_event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, escope, - "Cloning from ext odp-pool:%" PRIu64 " failed", - odp_pool_to_u64(odp_pool)); - } - return clone_event; - } - - /* - * Clone the event from an EM-pool: - */ - if (em_pool != pool) - pool_elem = pool_elem_get(em_pool); - type = ev_hdr->event_type; - - /* EM event pools created with type=SW can not support pkt events */ - if (unlikely(EM_CHECK_LEVEL > 0 && - pool_elem->event_type == EM_EVENT_TYPE_SW && - em_event_type_major(type) == EM_EVENT_TYPE_PACKET)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, escope, - "EM-pool:%s(%" PRI_POOL "):\n" - "Invalid event type:0x%x for buf", - pool_elem->name, em_pool, type); - return EM_EVENT_UNDEF; - } - - if (EM_CHECK_LEVEL > 0 && - unlikely(clone_uarea && ev_hdr->user_area.isinit && - pool_elem->user_area.size < ev_hdr->user_area.size)) { - INTERNAL_ERROR(EM_ERR_TOO_SMALL, escope, - "EM-pool:%s(%" PRI_POOL "):\n" - "Available user-area too small, clone uarea %u < needed uarea %u", - pool_elem->name, em_pool, pool_elem->user_area.size, - ev_hdr->user_area.size); - return EM_EVENT_UNDEF; - } - - if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) - clone_hdr = event_alloc_pkt(pool_elem, size); - else /* EM_EVENT_TYPE_SW */ - clone_hdr = event_alloc_buf(pool_elem, size); - - if (unlikely(!clone_hdr)) { - em_status_t err = INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, escope, - "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", - pool_elem->name, size, type, em_pool); - if (EM_DEBUG_PRINT && err != EM_OK && - (pool_elem->stats_opt.bit.available || - pool_elem->stats_opt.bit.cache_available)) - em_pool_info_print(em_pool); - return EM_EVENT_UNDEF; - } - - /* Update event ESV state for alloc/clone */ - if (esv_enabled()) - (void)evstate_alloc(clone_hdr->event, clone_hdr, EVSTATE__EVENT_CLONE); - - clone_hdr->flags.all = 0; /* clear only after evstate_alloc() */ - clone_hdr->event_type = type; /* store the event type */ - clone_hdr->event_size = size; /* store requested size */ - clone_hdr->egrp = EM_EVENT_GROUP_UNDEF; - clone_hdr->user_area.all = ev_hdr->user_area.all; - clone_hdr->user_area.size = pool_elem->user_area.size; /* uarea size comes from pool */ - clone_hdr->user_area.isinit = 1; - - /* Copy the event uarea content if used */ - if (clone_uarea && - ev_hdr->user_area.isinit && ev_hdr->user_area.size > 0) { - const void *uarea_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t)); - void *clone_uarea_ptr = (void *)((uintptr_t)clone_hdr + sizeof(event_hdr_t)); - size_t sz = MIN(pool_elem->user_area.size, ev_hdr->user_area.size); - - memcpy(clone_uarea_ptr, uarea_ptr, sz); - } - - clone_event = clone_hdr->event; - - /* Copy event payload from the parent event into the clone event */ - uintptr_t src_addr = (uintptr_t)event_pointer(event) + offset; - const void *src = (void *)src_addr; - void *dst = event_pointer(clone_event); - - memcpy(dst, src, size); - - /* Call the 'alloc' API hook function also for event-clone */ - if (EM_API_HOOKS_ENABLE && clone_event != EM_EVENT_UNDEF) - call_api_hooks_alloc(&clone_event, 1, 1, size, type, pool); - - return clone_event; -} - -em_event_t em_event_clone(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/) -{ - return event_clone_part(event, pool, 0, 0, true, EM_ESCOPE_EVENT_CLONE); -} - -em_event_t em_event_clone_part(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/, - uint32_t offset, uint32_t len, bool clone_uarea) -{ - return event_clone_part(event, pool, offset, len, clone_uarea, - EM_ESCOPE_EVENT_CLONE_PART); -} - -static inline int -event_uarea_init(em_event_t event, event_hdr_t **ev_hdr/*out*/) -{ - const odp_event_t odp_event = event_em2odp(event); - const odp_event_type_t odp_evtype = odp_event_type(odp_event); - odp_pool_t odp_pool = ODP_POOL_INVALID; - odp_packet_t odp_pkt; - odp_buffer_t odp_buf; - odp_packet_vector_t odp_pktvec; - event_hdr_t *hdr; - bool is_init; - - switch (odp_evtype) { - case ODP_EVENT_PACKET: - odp_pkt = odp_packet_from_event(odp_event); - hdr = odp_packet_user_area(odp_pkt); - is_init = hdr->user_area.isinit; - if (!is_init) - odp_pool = odp_packet_pool(odp_pkt); - break; - case ODP_EVENT_BUFFER: - odp_buf = odp_buffer_from_event(odp_event); - hdr = odp_buffer_user_area(odp_buf); - is_init = hdr->user_area.isinit; - if (!is_init) - odp_pool = odp_buffer_pool(odp_buf); - break; - case ODP_EVENT_PACKET_VECTOR: - odp_pktvec = odp_packet_vector_from_event(odp_event); - hdr = odp_packet_vector_user_area(odp_pktvec); - is_init = hdr->user_area.isinit; - if (!is_init) - odp_pool = odp_packet_vector_pool(odp_pktvec); - break; - default: - return -1; - } - - *ev_hdr = hdr; - - if (!is_init) { - /* - * Event user area metadata is not initialized in - * the event header - initialize it: - */ - hdr->user_area.all = 0; /* user_area.{} = all zero (.sizes=0) */ - hdr->user_area.isinit = 1; - - em_pool_t pool = pool_odp2em(odp_pool); - - if (pool == EM_POOL_UNDEF) - return 0; /* ext ODP pool: OK, no user area, sz=0 */ - - /* Event from an EM event pool, can init event user area */ - const mpool_elem_t *pool_elem = pool_elem_get(pool); - - if (unlikely(!pool_elem)) - return -2; /* invalid pool_elem */ - - hdr->user_area.size = pool_elem->user_area.size; - } - - return 0; -} - -void *em_event_uarea_get(em_event_t event, size_t *size /*out, if given*/) -{ - /* Check args */ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_GET, - "Inv.arg: event undef"); - goto no_uarea; - } - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - if (EM_CHECK_LEVEL > 0 && unlikely(err)) { - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_EVENT_UAREA_GET, - "Cannot init event user area: %d", err); - goto no_uarea; - } - - if (ev_hdr->user_area.size == 0) - goto no_uarea; - - /* - * Event has user area configured, return pointer and size - */ - void *uarea_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t)); - - if (size) - *size = ev_hdr->user_area.size; - - return uarea_ptr; - -no_uarea: - if (size) - *size = 0; - return NULL; -} - -em_status_t em_event_uarea_id_set(em_event_t event, uint16_t id) -{ - /* Check args */ - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_SET, - "Inv.arg: event undef"); - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(err, EM_ERR_OPERATION_FAILED, - EM_ESCOPE_EVENT_UAREA_ID_SET, - "Cannot init event user area: %d", err); - - ev_hdr->user_area.id = id; - ev_hdr->user_area.isset_id = 1; - - return EM_OK; -} - -em_status_t em_event_uarea_id_get(em_event_t event, bool *isset /*out*/, - uint16_t *id /*out*/) -{ - bool id_set = false; - em_status_t status = EM_OK; - - /* Check args, either 'isset' or 'id' ptrs must be provided (or both) */ - if (EM_CHECK_LEVEL > 0 && - (event == EM_EVENT_UNDEF || !(id || isset))) { - status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_GET, - "Inv.args: event:%" PRI_EVENT " isset:%p id:%p", - event, isset, id); - goto id_isset; - } - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - if (EM_CHECK_LEVEL > 0 && unlikely(err)) { - status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, - EM_ESCOPE_EVENT_UAREA_ID_GET, - "Cannot init event user area: %d", err); - goto id_isset; - } - - if (ev_hdr->user_area.isset_id) { - /* user-area-id has been set */ - id_set = true; - if (id) - *id = ev_hdr->user_area.id; /*out*/ - } - -id_isset: - if (isset) - *isset = id_set; /*out*/ - return status; -} - -em_status_t em_event_uarea_info(em_event_t event, - em_event_uarea_info_t *uarea_info /*out*/) -{ - em_status_t status = EM_ERROR; - - /* Check args */ - if (EM_CHECK_LEVEL > 0 && - unlikely(event == EM_EVENT_UNDEF || !uarea_info)) { - status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_INFO, - "Inv.args: event:%" PRI_EVENT " uarea_info:%p", - event, uarea_info); - goto err_uarea; - } - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - if (EM_CHECK_LEVEL > 0 && unlikely(err)) { - status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, - EM_ESCOPE_EVENT_UAREA_INFO, - "Cannot init event user area: %d", err); - goto err_uarea; - } - - if (ev_hdr->user_area.size == 0) { - uarea_info->uarea = NULL; - uarea_info->size = 0; - } else { - uarea_info->uarea = (void *)((uintptr_t)ev_hdr + - sizeof(event_hdr_t)); - uarea_info->size = ev_hdr->user_area.size; - } - - if (ev_hdr->user_area.isset_id) { - uarea_info->id.isset = true; - uarea_info->id.value = ev_hdr->user_area.id; - } else { - uarea_info->id.isset = false; - uarea_info->id.value = 0; - } - - return EM_OK; - -err_uarea: - if (uarea_info) { - uarea_info->uarea = NULL; - uarea_info->size = 0; - uarea_info->id.isset = false; - uarea_info->id.value = 0; - } - return status; -} - -em_event_t em_event_ref(em_event_t event) -{ - /* Check args */ - if (unlikely(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_REF, - "Invalid arg: event:%" PRI_EVENT "", event); - return EM_EVENT_UNDEF; - } - - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t odp_etype = odp_event_type(odp_event); - - if (EM_CHECK_LEVEL > 0 && unlikely(odp_etype != ODP_EVENT_PACKET)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_REF, - "Event not a packet! Refs not supported for odp-events of type:%d", - odp_etype); - return EM_EVENT_UNDEF; - } - - odp_packet_t odp_pkt = odp_packet_from_event(odp_event); - odp_packet_t pkt_ref = odp_packet_ref_static(odp_pkt); - event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt); - - if (EM_CHECK_LEVEL > 0 && unlikely(pkt_ref == ODP_PACKET_INVALID)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_EVENT_REF, - "ODP failure in odp_packet_ref_static()"); - return EM_EVENT_UNDEF; - } - - if (unlikely(EM_CHECK_LEVEL >= 2 && odp_pkt != pkt_ref)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), EM_ESCOPE_EVENT_REF, - "EM assumes all refs use the same handle"); - odp_packet_free(odp_pkt); - return EM_EVENT_UNDEF; - } - - /* - * Indicate that this event has references and some of the ESV checks - * must be omitted (evgen) - 'refs_used' will be set for the whole - * lifetime of this event, i.e. until the event is freed back into the - * pool. Important only for the first call of em_event_ref(), subsequent - * calls write same value. - */ - ev_hdr->flags.refs_used = 1; - - em_event_t ref = event; - - if (esv_enabled()) - ref = evstate_ref(event, ev_hdr); - - return ref; -} - -bool em_event_has_ref(em_event_t event) -{ - /* Check args */ - if (unlikely(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_HAS_REF, - "Invalid arg: event:%" PRI_EVENT "", event); - return false; - } - - return event_has_ref(event); -} - -void em_event_vector_free(em_event_t vector_event) -{ - if (EM_CHECK_LEVEL > 0 && - unlikely(vector_event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_FREE, - "Invalid args: vector_event:%" PRI_EVENT "", - vector_event); - return; - } - - if (EM_CHECK_LEVEL > 2 && - unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_FREE))) { - return; - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_free(&vector_event, 1); - - if (esv_enabled()) { - event_hdr_t *const ev_hdr = eventvec_to_hdr(vector_event); - - evstate_free(vector_event, ev_hdr, EVSTATE__EVENT_VECTOR_FREE); - } - - odp_event_t odp_event = event_em2odp(vector_event); - odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); - - odp_packet_vector_free(pkt_vec); -} - -uint32_t em_event_vector_tbl(em_event_t vector_event, - em_event_t **event_tbl/*out*/) -{ - if (EM_CHECK_LEVEL > 0 && - unlikely(vector_event == EM_EVENT_UNDEF || !event_tbl)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_TBL, - "Invalid args: vector_event:%" PRI_EVENT " event_tbl:%p", - vector_event, event_tbl); - return 0; - } - - if (EM_CHECK_LEVEL > 2 && - unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_TBL))) { - *event_tbl = NULL; - return 0; - } - - return event_vector_tbl(vector_event, event_tbl /*out*/); -} - -uint32_t em_event_vector_size(em_event_t vector_event) -{ - if (EM_CHECK_LEVEL > 0 && - unlikely(vector_event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_SIZE, - "Invalid arg, vector_event undefined!", vector_event); - return 0; - } - - if (EM_CHECK_LEVEL > 2 && - unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_SIZE))) - return 0; - - odp_event_t odp_event = event_em2odp(vector_event); - odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); - - return odp_packet_vector_size(pkt_vec); -} - -void em_event_vector_size_set(em_event_t vector_event, uint32_t size) -{ - if (EM_CHECK_LEVEL > 0 && - unlikely(vector_event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_SIZE_SET, - "Invalid arg, vector_event undefined!", vector_event); - return; - } - - if (EM_CHECK_LEVEL > 2 && - unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_SIZE_SET))) - return; - - odp_event_t odp_event = event_em2odp(vector_event); - odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); - - odp_packet_vector_size_set(pkt_vec, size); -} - -uint32_t em_event_vector_max_size(em_event_t vector_event) -{ - if (EM_CHECK_LEVEL > 0 && - unlikely(vector_event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_MAX_SIZE, - "Invalid arg, vector_event undefined!", vector_event); - return 0; - } - - if (EM_CHECK_LEVEL > 2 && - unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_MAX_SIZE))) - return 0; - - uint32_t max_size = 0; - em_status_t err = event_vector_max_size(vector_event, &max_size, - EM_ESCOPE_EVENT_VECTOR_MAX_SIZE); - if (unlikely(err != EM_OK)) - return 0; - - return max_size; -} - -em_status_t em_event_vector_info(em_event_t vector_event, - em_event_vector_info_t *vector_info /*out*/) -{ - em_status_t status = EM_ERROR; - - /* Check args */ - if (EM_CHECK_LEVEL > 0 && - unlikely(vector_event == EM_EVENT_UNDEF || !vector_info)) { - status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_INFO, - "Invalid args: vector_event:%" PRI_EVENT " vector_info:%p", - vector_event, vector_info); - goto err_vecinfo; - } - - if (EM_CHECK_LEVEL > 2 && - unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_INFO))) { - status = EM_ERR_BAD_TYPE; - goto err_vecinfo; - } - - /* Get the max size */ - status = event_vector_max_size(vector_event, &vector_info->max_size, - EM_ESCOPE_EVENT_VECTOR_INFO); - if (unlikely(status != EM_OK)) - goto err_vecinfo; - - /* Get vector size and the event-table */ - vector_info->size = event_vector_tbl(vector_event, &vector_info->event_tbl/*out*/); - - return EM_OK; - -err_vecinfo: - if (vector_info) { - vector_info->event_tbl = NULL; - vector_info->size = 0; - vector_info->max_size = 0; - } - return status; -} - -uint64_t em_event_to_u64(em_event_t event) -{ - return (uint64_t)event; -} +/* + * Copyright (c) 2015-2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +em_event_t em_alloc(uint32_t size, em_event_type_t type, em_pool_t pool) +{ + const mpool_elem_t *const pool_elem = pool_elem_get(pool); + em_event_type_t major_type = em_event_type_major(type); + + if (EM_CHECK_LEVEL > 0 && + unlikely(size == 0 || !pool_elem || major_type == EM_EVENT_TYPE_TIMER_IND)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ALLOC, + "Invalid args: size:%u type:%u pool:%" PRI_POOL "", + size, type, pool); + return EM_EVENT_UNDEF; + } + if (EM_CHECK_LEVEL >= 2 && unlikely(!pool_allocated(pool_elem))) + INTERNAL_ERROR(EM_ERR_NOT_CREATED, EM_ESCOPE_ALLOC, + "Invalid pool:%" PRI_POOL ", pool not created", pool); + + /* + * EM event pools created with type=SW can not support pkt events. + */ + if (EM_CHECK_LEVEL >= 1 && + unlikely(pool_elem->event_type == EM_EVENT_TYPE_SW && + major_type == EM_EVENT_TYPE_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, + "EM-pool:%s(%" PRI_POOL "):\n" + "Invalid event type:0x%x for buf", + pool_elem->name, pool_elem->em_pool, type); + return EM_EVENT_UNDEF; + } + if (EM_CHECK_LEVEL >= 1 && + unlikely(pool_elem->event_type == EM_EVENT_TYPE_VECTOR && + major_type != EM_EVENT_TYPE_VECTOR)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, + "EM-pool:%s(%" PRI_POOL "):\n" + "Invalid event type:0x%x for vector", + pool_elem->name, pool_elem->em_pool, type); + return EM_EVENT_UNDEF; + } + + const em_event_t event = event_alloc(pool_elem, size, type, EVSTATE__ALLOC); + + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + em_status_t err = + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC, + "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", + pool_elem->name, size, type, pool); + if (EM_DEBUG_PRINT && err != EM_OK && + (pool_elem->stats_opt.bit.available || + pool_elem->stats_opt.bit.cache_available)) { + em_pool_info_print(pool); + } + return EM_EVENT_UNDEF; + } + + if (EM_API_HOOKS_ENABLE && event != EM_EVENT_UNDEF) + call_api_hooks_alloc(&event, 1, 1, size, type, pool); + + return event; +} + +int em_alloc_multi(em_event_t events[/*out*/], int num, + uint32_t size, em_event_type_t type, em_pool_t pool) +{ + if (unlikely(num == 0)) + return 0; + + const mpool_elem_t *const pool_elem = pool_elem_get(pool); + int ret = 0; + + if (EM_CHECK_LEVEL > 0 && + unlikely(!events || num < 0 || size == 0 || !pool_elem || + em_event_type_major(type) == EM_EVENT_TYPE_TIMER_IND)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ALLOC_MULTI, + "Invalid args: events:%p num:%d size:%u type:%u pool:%" PRI_POOL "", + events, num, size, type, pool); + return 0; + } + if (EM_CHECK_LEVEL >= 2 && unlikely(!pool_allocated(pool_elem))) + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ALLOC_MULTI, + "Invalid pool:%" PRI_POOL ", pool not created", pool); + + if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) { + /* + * EM event pools created with type=PKT can support SW events + * as well as pkt events. + */ + ret = event_alloc_pkt_multi(events, num, pool_elem, size, type); + } else if (pool_elem->event_type == EM_EVENT_TYPE_SW) { + /* + * EM event pools created with type=SW can not support + * pkt events. + */ + if (EM_CHECK_LEVEL >= 1 && + unlikely(em_event_type_major(type) == EM_EVENT_TYPE_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC_MULTI, + "EM-pool:%s(%" PRI_POOL "): Invalid event type:0x%x for buf", + pool_elem->name, pool, type); + return 0; + } + ret = event_alloc_buf_multi(events, num, pool_elem, size, type); + } else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR) { + if (EM_CHECK_LEVEL >= 1 && + unlikely(em_event_type_major(type) != EM_EVENT_TYPE_VECTOR)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, + "EM-pool:%s(%" PRI_POOL "): Inv. event type:0x%x for vector", + pool_elem->name, pool, type); + return 0; + } + ret = event_alloc_vector_multi(events, num, pool_elem, size, type); + } + + if (unlikely(EM_CHECK_LEVEL > 0 && ret != num)) { + em_status_t err = + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC_MULTI, + "Requested num:%d events, allocated:%d\n" + "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", + num, ret, + pool_elem->name, size, type, pool); + if (EM_DEBUG_PRINT && err != EM_OK && + (pool_elem->stats_opt.bit.available || + pool_elem->stats_opt.bit.cache_available)) { + em_pool_info_print(pool); + } + } + + if (EM_API_HOOKS_ENABLE && ret > 0) + call_api_hooks_alloc(events, ret, num, size, type, pool); + + return ret; +} + +/** + * @brief Helper to check if the event is a vector + * + * @param vector_event Event handle + * @return true the event is a vector + * @return false the event is NOT a vector + */ +static inline bool is_vector_type(em_event_t vector_event) +{ + odp_event_t odp_event = event_em2odp(vector_event); + odp_event_type_t odp_etype = odp_event_type(odp_event); + + if (odp_etype == ODP_EVENT_PACKET_VECTOR) + return true; + + return false; +} + +/** + * @brief Helper to check if the event is a vector, if not report an error + * + * @param vector_event Event handle + * @param escope Error scope to use if reporting an error + * @return true the event is a vector + * @return false the event is NOT a vector, reports an error + */ +static inline bool is_vector_type_or_error(em_event_t vector_event, + em_escope_t escope) +{ + bool is_vec = is_vector_type(vector_event); + + if (likely(is_vec)) + return true; + + INTERNAL_ERROR(EM_ERR_BAD_TYPE, escope, "Event not a vector"); + return false; +} + +/** + * @brief Handle ESV state for 'em_free' for the event-table of a vector event + * + * @param event Vector event handle + */ +static void event_vector_prepare_free_full(em_event_t event, const uint16_t api_op) +{ + /* em_free() frees the vector as well as all the events it contains */ + em_event_t *ev_tbl; + uint32_t sz = event_vector_tbl(event, &ev_tbl); + + if (sz) { + event_hdr_t *ev_hdrs[sz]; + + /* same as event_to_hdr_multi(), removes gcc-12 LTO error in haswell */ + for (uint32_t i = 0; i < sz; i++) + ev_hdrs[i] = event_to_hdr(ev_tbl[i]); + + evstate_free_multi(ev_tbl, ev_hdrs, sz, api_op); + + /* drop ESV generation from event handles */ + (void)events_em2pkt_inplace(ev_tbl, sz); + } +} + +/** + * @brief Handle ESV state for 'em_event_unmark_free/_multi' for the event-table + * of a vector event. + * + * @param event Vector event handle + */ +static void event_vector_prepare_free_full__revert(em_event_t event, const uint16_t api_op) +{ + /* em_free() frees the vector as well as all the events it contains */ + em_event_t *ev_tbl; + uint32_t sz = event_vector_tbl(event, &ev_tbl); + + if (sz) { + event_hdr_t *ev_hdrs[sz]; + + event_to_hdr_multi(ev_tbl, ev_hdrs, sz); + evstate_unmark_free_multi(ev_tbl, ev_hdrs, sz, api_op); + + /* restore dropped ESV generation to event handles, unmodified in header */ + for (unsigned int i = 0; i < sz; i++) + ev_tbl[i] = ev_hdrs[i]->event; + } +} + +/** + * Helper to em_free() and em_free_multi() to determine whether timeout events + * from periodic timer rings can exist and if free() needs to check for them. + * Active tmo events from ring timers must never be freed (user error), only + * inactive (last tmo event after cancel) can be freed. + */ +static inline bool timer_rings_used(void) +{ + return em_shm->timers.num_ring_create_calls > 0 ? true : false; +} + +void em_free(em_event_t event) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE, + "event undefined!"); + return; + } + + odp_event_t odp_event = event_em2odp(event); + const bool esv_ena = esv_enabled(); + /* Is a check for an active periodic tmo event from a timer ring needed? */ + const bool check_tmos = EM_CHECK_LEVEL >= 2 && timer_rings_used(); + + if (unlikely(check_tmos && odp_event_type(odp_event) == ODP_EVENT_TIMEOUT)) { + event_hdr_t *ev_hdr = event_to_hdr(event); + + if (unlikely(ev_hdr->flags.tmo_type != EM_TMO_TYPE_NONE)) { + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_FREE, + "Can't free active TIMER event"); + return; + } + if (esv_ena) + evstate_free(event, ev_hdr, EVSTATE__FREE); + } else if (esv_ena) { + event_hdr_t *ev_hdr = event_to_hdr(event); + + evstate_free(event, ev_hdr, EVSTATE__FREE); + if (is_vector_type(event)) + event_vector_prepare_free_full(event, EVSTATE__FREE); + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_free(&event, 1); + + odp_event_free(odp_event); +} + +/** + * Helper to em_free_multi() to remove active periodic tmo events + * (from ring timers) from the free list. + * + * Active tmo events from ring timers must never be freed (user error), only + * inactive (last tmo event after cancel) can be freed. Thus remove the active + * ones if the user incorrectly tries to free them. + */ +static inline int +rem_active_ring_timer_tmos(const int num, odp_event_t odp_evtbl[/*in/out*/], + event_hdr_t *ev_hdr_tbl[/*in/out*/], + em_event_t ev_tbl[/*in/out*/]) +{ + int first_tmo_idx = -1; + + /* find first active tmo-event */ + for (int i = 0; i < num; i++) { + if (unlikely(odp_event_type(odp_evtbl[i]) == ODP_EVENT_TIMEOUT && + ev_hdr_tbl[i]->flags.tmo_type != EM_TMO_TYPE_NONE)) { + first_tmo_idx = i; + break; + } + } + + /* + * No active tmo events found - all OK, return. + * This is the normal, no-error, scenario. + */ + if (likely(first_tmo_idx == -1)) + return num; + + /* + * Error: Active tmo events found - remove them from the arrays + */ + + /* last event is tmo, no need to move/copy anything, just drop last */ + if (first_tmo_idx == num - 1) + return num - 1; + + /* + * Store indexes of "normal events" (i.e. events other than active + * tmo events) to copy from 'first_tmo_idx + 1' onwards. + */ + int num_cpy = 0; + int cpy_idx[num - first_tmo_idx - 1]; + + for (int i = first_tmo_idx + 1; i < num; i++) { + if (likely(!(odp_event_type(odp_evtbl[i]) == ODP_EVENT_TIMEOUT && + ev_hdr_tbl[i]->flags.tmo_type != EM_TMO_TYPE_NONE))) + cpy_idx[num_cpy++] = i; + } + + /* all further events were active tmo events, drop them */ + if (num_cpy == 0) + return first_tmo_idx; + + /* + * Remove all active tmo events from the arrays by copying the "normal" + * events into the slots occupied by the active tmo events. + */ + for (int i = 0; i < num_cpy; i++) { + int src_idx = cpy_idx[i]; + int dst_idx = first_tmo_idx + i; + + odp_evtbl[dst_idx] = odp_evtbl[src_idx]; + ev_hdr_tbl[dst_idx] = ev_hdr_tbl[src_idx]; + ev_tbl[dst_idx] = ev_tbl[src_idx]; + } + + return first_tmo_idx + num_cpy; +} + +void em_free_multi(em_event_t events[], int num) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(!events || num < 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, + "Inv.args: events[]:%p num:%d", events, num); + return; + } + if (unlikely(num == 0)) + return; + + if (EM_CHECK_LEVEL >= 3) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, + "events[%d] undefined!", i); + return; + } + } + + int num_free = num; + const bool esv_ena = esv_enabled(); + odp_event_t odp_events[num]; + + events_em2odp(events, odp_events/*out*/, num); + + /* Is a check for active periodic tmo events from timer rings needed? */ + const bool check_tmos = EM_CHECK_LEVEL >= 2 && timer_rings_used(); + + if (check_tmos || esv_ena) { + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + + if (check_tmos) { + num_free = rem_active_ring_timer_tmos(num, odp_events, ev_hdrs, events); + if (unlikely(num_free != num)) + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_FREE_MULTI, + "Can't free active TIMER events: %d of %d ignored", + num_free, num); + } + + if (esv_ena) { + evstate_free_multi(events, ev_hdrs, num_free, EVSTATE__FREE_MULTI); + + for (int i = 0; i < num_free; i++) { + if (is_vector_type(events[i])) + event_vector_prepare_free_full(events[i], + EVSTATE__FREE_MULTI); + } + } + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_free(events, num_free); + + odp_event_free_multi(odp_events, num_free); +} + +/** + * Helper to em_send(). + * Send out of EM via event-chaining and a user-provided function + * 'event_send_device()' to another device + */ +static inline em_status_t +send_external(em_event_t event, em_queue_t queue) +{ + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(&event, 1, queue, EM_EVENT_GROUP_UNDEF); + + em_status_t stat = send_chaining(event, queue); + + if (EM_CHECK_LEVEL == 0) + return stat; + + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND, + "send out-of-EM via event-chaining failed: Q:%" PRI_QUEUE "", queue); + return EM_OK; +} + +/** + * Helper to em_send_multi(). + * Send out of EM via event-chaining and a user-provided function + * 'event_send_device()' to another device + */ +static inline int +send_external_multi(const em_event_t events[], int num, em_queue_t queue) +{ + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(events, num, queue, EM_EVENT_GROUP_UNDEF); + + int num_sent = send_chaining_multi(events, num, queue); + + if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_SEND_MULTI, + "send_chaining_multi: req:%d, sent:%d", + num, num_sent); + } + + return num_sent; +} + +/** + * Helper to em_send(). + * Send to an EM internal queue. + */ +static inline em_status_t +send_internal(em_event_t event, event_hdr_t *ev_hdr, em_queue_t queue) +{ + queue_elem_t *q_elem = queue_elem_get(queue); + em_status_t stat; + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !q_elem, + EM_ERR_BAD_ARG, EM_ESCOPE_SEND, + "Invalid queue:%" PRI_QUEUE "", queue); + RETURN_ERROR_IF(EM_CHECK_LEVEL >= 2 && !queue_allocated(q_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_SEND, + "Invalid queue:%" PRI_QUEUE "", queue); + + /* Buffer events sent from EO-start to scheduled queues */ + if (unlikely(em_locm.start_eo_elem && q_elem->flags.scheduled)) { + /* + * em_send() called from within an EO-start function: + * all events sent to scheduled queues will be buffered + * and sent when the EO-start operation completes. + */ + if (esv_enabled()) + evstate_usr2em(event, ev_hdr, EVSTATE__SEND); + + int num_sent = eo_start_buffer_events(&event, 1, queue); + + if (unlikely(num_sent != 1)) { + stat = EM_ERR_OPERATION_FAILED; + goto error_return; + } + + return EM_OK; /* Success */ + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(&event, 1, queue, EM_EVENT_GROUP_UNDEF); + + if (q_elem->type == EM_QUEUE_TYPE_OUTPUT) { + /* + * Send out of EM via an EM output-queue and a user provided + * function of type em_output_func_t + */ + stat = send_output(event, q_elem); + + if (unlikely(stat != EM_OK)) + goto error_return_noesv; + + return EM_OK; /* Success */ + } + + /* + * Normal send to a queue on this device + */ + if (esv_enabled()) + evstate_usr2em(event, ev_hdr, EVSTATE__SEND); + + switch (q_elem->type) { + case EM_QUEUE_TYPE_ATOMIC: + case EM_QUEUE_TYPE_PARALLEL: + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + stat = send_event(event, q_elem); + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + stat = queue_unsched_enqueue(event, q_elem); + break; + case EM_QUEUE_TYPE_LOCAL: + stat = send_local(event, q_elem); + break; + default: + stat = EM_ERR_NOT_FOUND; + break; + } + + if (likely(stat == EM_OK)) + return EM_OK; /* Success */ + +error_return: + if (esv_enabled()) + evstate_usr2em_revert(event, ev_hdr, EVSTATE__SEND__FAIL); +error_return_noesv: + if (EM_CHECK_LEVEL == 0) + return stat; + stat = INTERNAL_ERROR(stat, EM_ESCOPE_SEND, + "send failed: Q:%" PRI_QUEUE " type:%" PRI_QTYPE "", + queue, q_elem->type); + return stat; +} + +/** + * Helper to em_send_multi(). + * Send to an EM internal queue. + */ +static inline int +send_internal_multi(const em_event_t events[], event_hdr_t *ev_hdrs[], + int num, em_queue_t queue) +{ + queue_elem_t *q_elem = queue_elem_get(queue); + int num_sent; + + if (EM_CHECK_LEVEL > 0 && unlikely(!q_elem)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_SEND_MULTI, + "Invalid queue:%" PRI_QUEUE "", queue); + return 0; + } + if (EM_CHECK_LEVEL >= 2 && unlikely(!queue_allocated(q_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_SEND_MULTI, + "Invalid queue:%" PRI_QUEUE "", queue); + return 0; + } + + /* Buffer events sent from EO-start to scheduled queues */ + if (unlikely(em_locm.start_eo_elem && q_elem->flags.scheduled)) { + /* + * em_send_multi() called from within an EO-start function: + * all events sent to scheduled queues will be buffered + * and sent when the EO-start operation completes. + */ + if (esv_enabled()) + evstate_usr2em_multi(events, ev_hdrs, num, + EVSTATE__SEND_MULTI); + num_sent = eo_start_buffer_events(events, num, queue); + + if (unlikely(num_sent != num)) + goto error_return; + + return num_sent; /* Success */ + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(events, num, queue, EM_EVENT_GROUP_UNDEF); + + if (q_elem->type == EM_QUEUE_TYPE_OUTPUT) { + /* + * Send out of EM via an EM output-queue and a user provided + * function of type em_output_func_t + */ + num_sent = send_output_multi(events, num, q_elem); + + if (unlikely(num_sent != num)) + goto error_return_noesv; + + return num_sent; /* Success */ + } + + /* + * Normal send to a queue on this device + */ + if (esv_enabled()) + evstate_usr2em_multi(events, ev_hdrs, num, EVSTATE__SEND_MULTI); + + switch (q_elem->type) { + case EM_QUEUE_TYPE_ATOMIC: + case EM_QUEUE_TYPE_PARALLEL: + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + num_sent = send_event_multi(events, num, q_elem); + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + num_sent = queue_unsched_enqueue_multi(events, num, q_elem); + break; + case EM_QUEUE_TYPE_LOCAL: + num_sent = send_local_multi(events, num, q_elem); + break; + default: + num_sent = 0; + break; + } + + if (likely(num_sent == num)) + return num_sent; /* Success */ + +error_return: + if (esv_enabled()) + evstate_usr2em_revert_multi(&events[num_sent], + &ev_hdrs[num_sent], + num - num_sent, + EVSTATE__SEND_MULTI__FAIL); +error_return_noesv: + if (EM_CHECK_LEVEL > 0) + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_MULTI, + "send-multi failed: req:%d, sent:%d", + num, num_sent); + return num_sent; +} + +em_status_t em_send(em_event_t event, em_queue_t queue) +{ + const bool is_external = queue_external(queue); + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_SEND, "Invalid event"); + + event_hdr_t *ev_hdr = event_to_hdr(event); + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, + EM_ERR_BAD_ARG, EM_ESCOPE_SEND, "Timer-ring event can't be sent"); + + /* avoid unnecessary writing 'undef' in case event is a ref */ + if (ev_hdr->egrp != EM_EVENT_GROUP_UNDEF) + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + + /* + * External queue belongs to another EM instance, send out via EMC/BIP + */ + if (is_external) + return send_external(event, queue); + + /* + * Queue belongs to this EM instance + */ + return send_internal(event, ev_hdr, queue); +} + +/* + * em_send_group_multi() helper: check events + */ +static inline em_status_t +send_multi_check_events(const em_event_t events[], int num) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(!events || num <= 0)) + return EM_ERR_BAD_ARG; + + if (EM_CHECK_LEVEL >= 3) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) + return EM_ERR_BAD_POINTER; + } + + return EM_OK; +} + +int em_send_multi(const em_event_t events[], int num, em_queue_t queue) +{ + const bool is_external = queue_external(queue); + event_hdr_t *ev_hdrs[num]; + + /* Check events */ + em_status_t err = send_multi_check_events(events, num); + + if (unlikely(err != EM_OK)) { + INTERNAL_ERROR(err, EM_ESCOPE_SEND_MULTI, + "Invalid events:%p num:%d", events, num); + return 0; + } + + event_to_hdr_multi(events, ev_hdrs, num); + + for (int i = 0; i < num; i++) { + if (EM_CHECK_LEVEL > 0 && + unlikely(ev_hdrs[i]->event_type == EM_EVENT_TYPE_TIMER_IND)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_SEND_MULTI, + "Timer-ring event[%d] can't be sent", i); + return 0; + } + /* avoid unnecessary writing 'undef' in case event is a ref */ + if (ev_hdrs[i]->egrp != EM_EVENT_GROUP_UNDEF) + ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; + } + + /* + * External queue belongs to another EM instance, send out via EMC/BIP + */ + if (is_external) + return send_external_multi(events, num, queue); + + /* + * Queue belongs to this EM instance + */ + return send_internal_multi(events, ev_hdrs, num, queue); +} + +void *em_event_pointer(em_event_t event) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_POINTER, + "event undefined!"); + return NULL; + } + + void *ev_ptr = event_pointer(event); + + if (EM_CHECK_LEVEL > 0 && unlikely(!ev_ptr)) + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER, + "Event pointer NULL (unsupported event type)"); + + return ev_ptr; +} + +void *em_event_pointer_and_size(em_event_t event, uint32_t *size /*out*/) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_POINTER_AND_SIZE, + "event undefined!"); + return NULL; + } + + if (!size) { + /* User not interested in 'size', + * fall back to em_event_pointer() functionality + */ + void *ev_ptr = event_pointer(event); + + if (EM_CHECK_LEVEL > 0 && unlikely(!ev_ptr)) + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER_AND_SIZE, + "Event pointer NULL (unsupported event type)"); + return ev_ptr; + } + + const odp_event_t odp_event = event_em2odp(event); + const odp_event_type_t odp_etype = odp_event_type(odp_event); + uint32_t event_size = 0; + void *ev_ptr = NULL; /* return value */ + + if (odp_etype == ODP_EVENT_PACKET) { + const odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + + ev_ptr = odp_packet_data_seg_len(odp_pkt, &event_size); + } else if (odp_etype == ODP_EVENT_BUFFER) { + const odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); + const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf); + const uint32_t align_offset = ev_hdr->align_offset; + + ev_ptr = odp_buffer_addr(odp_buf); + if (align_offset) + ev_ptr = (void *)((uintptr_t)ev_ptr + 32 - align_offset); + event_size = ev_hdr->event_size; + } + + if (EM_CHECK_LEVEL > 0 && unlikely(!ev_ptr)) { + INTERNAL_ERROR(EM_ERR_BAD_TYPE, EM_ESCOPE_EVENT_POINTER_AND_SIZE, + "Event pointer NULL (odp event type:%u)", odp_etype); + /* NULL for unrecognized odp_etype, also for vectors and timer ring tmos */ + return NULL; + } + + *size = event_size; + return ev_ptr; +} + +uint32_t em_event_get_size(em_event_t event) +{ + if (unlikely(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_SIZE, + "event undefined!"); + return 0; + } + + const odp_event_t odp_event = event_em2odp(event); + const odp_event_type_t odp_etype = odp_event_type(odp_event); + + if (odp_etype == ODP_EVENT_PACKET) { + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + + return odp_packet_seg_len(odp_pkt); + } else if (odp_etype == ODP_EVENT_BUFFER) { + odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); + const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf); + + return ev_hdr->event_size; + } else if (odp_etype == ODP_EVENT_TIMEOUT) { + return 0; + } + + if (EM_CHECK_LEVEL > 0) + INTERNAL_ERROR(EM_ERR_NOT_FOUND, EM_ESCOPE_EVENT_GET_SIZE, + "Unexpected odp event type:%u", odp_etype); + return 0; +} + +static inline odp_pool_t event_get_odp_pool(em_event_t event) +{ + odp_event_t odp_event = event_em2odp(event); + +#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API + return odp_event_pool(odp_event); +#else + odp_event_type_t type = odp_event_type(odp_event); + odp_pool_t odp_pool = ODP_POOL_INVALID; + + if (type == ODP_EVENT_PACKET) { + odp_packet_t pkt = odp_packet_from_event(odp_event); + + odp_pool = odp_packet_pool(pkt); + } else if (type == ODP_EVENT_BUFFER) { + odp_buffer_t buf = odp_buffer_from_event(odp_event); + + odp_pool = odp_buffer_pool(buf); + } else if (type == ODP_EVENT_PACKET_VECTOR) { + odp_packet_vector_t pktvec = odp_packet_vector_from_event(odp_event); + + odp_pool = odp_packet_vector_pool(pktvec); + } + + return odp_pool; +#endif +} + +em_pool_t em_event_get_pool(em_event_t event) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_POOL, + "event undefined!"); + return EM_POOL_UNDEF; + } + + odp_pool_t odp_pool = event_get_odp_pool(event); + + if (unlikely(odp_pool == ODP_POOL_INVALID)) + return EM_POOL_UNDEF; + + em_pool_t pool = pool_odp2em(odp_pool); + + /* + * Don't report an error if 'pool == EM_POOL_UNDEF' since that might + * happen if the event is e.g. input from pktio that is using external + * (to EM) odp pools. + */ + return pool; +} + +em_pool_t em_event_get_pool_subpool(em_event_t event, int *subpool /*out*/) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_POOL_SUBPOOL, + "event undefined!"); + return EM_POOL_UNDEF; + } + + odp_pool_t odp_pool = event_get_odp_pool(event); + + if (unlikely(odp_pool == ODP_POOL_INVALID)) + return EM_POOL_UNDEF; + + pool_subpool_t pool_subpool = pool_subpool_odp2em(odp_pool); + + if (unlikely(pool_subpool.pool == (uint32_t)(uintptr_t)EM_POOL_UNDEF)) + return EM_POOL_UNDEF; + + if (subpool) + *subpool = pool_subpool.subpool; + + return (em_pool_t)(uintptr_t)pool_subpool.pool; +} + +em_status_t em_event_set_type(em_event_t event, em_event_type_t newtype) +{ + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_SET_TYPE, "event undefined!"); + + /* similar to 'ev_hdr = event_to_hdr(event)', slightly extended: */ + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t evtype = odp_event_type(odp_event); + event_hdr_t *ev_hdr; + + switch (evtype) { + case ODP_EVENT_PACKET: { + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + + ev_hdr = odp_packet_user_area(odp_pkt); + break; + } + case ODP_EVENT_BUFFER: { + odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); + + ev_hdr = odp_buffer_user_area(odp_buf); + break; + } + case ODP_EVENT_PACKET_VECTOR: { + odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event); + + if (EM_CHECK_LEVEL >= 1) { + em_event_type_t new_major = em_event_type_major(newtype); + + RETURN_ERROR_IF(new_major != EM_EVENT_TYPE_VECTOR, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_SET_TYPE, + "Event type:0x%x not suitable for a vector", newtype); + } + ev_hdr = odp_packet_vector_user_area(odp_pktvec); + break; + } + default: + return INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_SET_TYPE, + "Unsupported odp event type:%u", evtype); + } + + ev_hdr->event_type = newtype; + + return EM_OK; +} + +em_event_type_t em_event_get_type(em_event_t event) +{ + const event_hdr_t *ev_hdr; + + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_TYPE, + "event undefined!"); + return EM_EVENT_TYPE_UNDEF; + } + + ev_hdr = event_to_hdr(event); + + if (EM_CHECK_LEVEL >= 3 && unlikely(ev_hdr == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_GET_TYPE, + "ev_hdr == NULL"); + return EM_EVENT_TYPE_UNDEF; + } + + return ev_hdr->event_type; +} + +int em_event_get_type_multi(const em_event_t events[], int num, + em_event_type_t types[/*out:num*/]) +{ + int i; + + /* Check all args */ + if (EM_CHECK_LEVEL > 0) { + if (unlikely(!events || num < 0 || !types)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_GET_TYPE_MULTI, + "Inv.args: events:%p num:%d types:%p", + events, num, types); + return 0; + } + if (unlikely(!num)) + return 0; + } + + if (EM_CHECK_LEVEL >= 3) { + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, + EM_ESCOPE_EVENT_GET_TYPE_MULTI, + "events[%d] undefined!", i); + return 0; + } + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + + for (i = 0; i < num; i++) + types[i] = ev_hdrs[i]->event_type; + + return num; +} + +int em_event_same_type_multi(const em_event_t events[], int num, + em_event_type_t *same_type /*out*/) +{ + /* Check all args */ + if (EM_CHECK_LEVEL > 0) { + if (unlikely(!events || num < 0 || !same_type)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_SAME_TYPE_MULTI, + "Inv.args: events:%p num:%d same_type:%p", + events, num, same_type); + return 0; + } + if (unlikely(!num)) + return 0; + } + + if (EM_CHECK_LEVEL >= 3) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_SAME_TYPE_MULTI, + "events[%d] undefined!", i); + return 0; + } + } + + const em_event_type_t type = event_to_hdr(events[0])->event_type; + int same = 1; + + for (; same < num && type == event_to_hdr(events[same])->event_type; + same++) + ; + + *same_type = type; + return same; +} + +em_status_t em_event_mark_send(em_event_t event, em_queue_t queue) +{ + if (!esv_enabled()) + return EM_OK; + + /* Check all args */ + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, + "Inv.args: event:%" PRI_EVENT "", event); + if (EM_CHECK_LEVEL >= 3) { + const queue_elem_t *const q_elem = queue_elem_get(queue); + + RETURN_ERROR_IF(!q_elem, EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, + "Inv.args: Q:%" PRI_QUEUE "", queue); + RETURN_ERROR_IF(!queue_allocated(q_elem) || !q_elem->flags.scheduled, + EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_MARK_SEND, + "Inv.queue:%" PRI_QUEUE " type:%" PRI_QTYPE "", + queue, q_elem->type); + } + + event_hdr_t *ev_hdr = event_to_hdr(event); + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, "Timer-ring event not allowed"); + + /* avoid unnecessary writing 'undef' in case event is a ref */ + if (ev_hdr->egrp != EM_EVENT_GROUP_UNDEF) + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + + evstate_usr2em(event, ev_hdr, EVSTATE__MARK_SEND); + + /* + * Data memory barrier, we are bypassing em_send(), odp_queue_enq() + * and need to guarantee memory sync before the event ends up into an + * EM queue again. + */ + odp_mb_full(); + + return EM_OK; +} + +em_status_t em_event_unmark_send(em_event_t event) +{ + if (!esv_enabled()) + return EM_OK; + + /* Check all args */ + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_SEND, + "Inv.args: event:%" PRI_EVENT "", event); + + event_hdr_t *ev_hdr = event_to_hdr(event); + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_SEND, + "Timer-ring event not allowed"); + + evstate_unmark_send(event, ev_hdr); + + return EM_OK; +} + +void em_event_mark_free(em_event_t event) +{ + if (!esv_enabled()) + return; + + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE, + "Event undefined!"); + return; + } + + event_hdr_t *const ev_hdr = event_to_hdr(event); + + if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE, + "Timer-ring event not allowed"); + return; + } + + evstate_free(event, ev_hdr, EVSTATE__MARK_FREE); + + if (is_vector_type(event)) + event_vector_prepare_free_full(event, EVSTATE__MARK_FREE); +} + +void em_event_unmark_free(em_event_t event) +{ + if (!esv_enabled()) + return; + + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE, + "Event undefined!"); + return; + } + + event_hdr_t *const ev_hdr = event_to_hdr(event); + + if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE, + "Timer-ring event not allowed"); + return; + } + + evstate_unmark_free(event, ev_hdr, EVSTATE__UNMARK_FREE); + if (is_vector_type(event)) + event_vector_prepare_free_full__revert(event, EVSTATE__UNMARK_FREE); +} + +void em_event_mark_free_multi(const em_event_t events[], int num) +{ + if (!esv_enabled()) + return; + + if (EM_CHECK_LEVEL > 0 && unlikely(!events || num < 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE_MULTI, + "Inv.args: events[]:%p num:%d", events, num); + return; + } + if (unlikely(num == 0)) + return; + + if (EM_CHECK_LEVEL >= 3) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_MARK_FREE_MULTI, + "events[%d] undefined!", i); + return; + } + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + + for (int i = 0; i < num; i++) { + if (EM_CHECK_LEVEL > 0 && + unlikely(ev_hdrs[i]->event_type == EM_EVENT_TYPE_TIMER_IND)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE_MULTI, + "Timer-ring event[%d] not allowed", i); + continue; + } + + evstate_free(events[i], ev_hdrs[i], EVSTATE__MARK_FREE_MULTI); + if (is_vector_type(events[i])) + event_vector_prepare_free_full(events[i], EVSTATE__MARK_FREE_MULTI); + } +} + +void em_event_unmark_free_multi(const em_event_t events[], int num) +{ + if (!esv_enabled()) + return; + + if (unlikely(!events || num < 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, + "Inv.args: events[]:%p num:%d", events, num); + return; + } + if (unlikely(num == 0)) + return; + + if (EM_CHECK_LEVEL >= 3) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, + "events[%d] undefined!", i); + return; + } + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + + for (int i = 0; i < num; i++) { + if (EM_CHECK_LEVEL > 0 && + unlikely(ev_hdrs[i]->event_type == EM_EVENT_TYPE_TIMER_IND)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, + "Timer-ring event[%d] not allowed", i); + continue; + } + + evstate_unmark_free(events[i], ev_hdrs[i], EVSTATE__UNMARK_FREE_MULTI); + if (is_vector_type(events[i])) + event_vector_prepare_free_full__revert(events[i], + EVSTATE__UNMARK_FREE_MULTI); + } +} + +static em_event_t event_clone_part(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/, + uint32_t offset, uint32_t len, bool clone_uarea, + em_escope_t escope) +{ + const mpool_elem_t *pool_elem = pool_elem_get(pool); + /* use escope to distinguish between em_event_clone() and em_event_clone_part() */ + const bool is_clone_part = escope == EM_ESCOPE_EVENT_CLONE_PART ? true : false; + + /* Check all args */ + if (EM_CHECK_LEVEL > 0 && + unlikely(event == EM_EVENT_UNDEF || + (pool != EM_POOL_UNDEF && !pool_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, escope, + "Inv.args: event:%" PRI_EVENT " pool:%" PRI_POOL "", + event, pool); + return EM_EVENT_UNDEF; + } + + if (EM_CHECK_LEVEL >= 2 && + unlikely(pool_elem && !pool_allocated(pool_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_STATE, escope, + "Inv.args: pool:%" PRI_POOL " not created", pool); + return EM_EVENT_UNDEF; + } + + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t odp_evtype = odp_event_type(odp_event); + odp_pool_t odp_pool = ODP_POOL_INVALID; + odp_packet_t pkt = ODP_PACKET_INVALID; + odp_buffer_t buf = ODP_BUFFER_INVALID; + + if (unlikely(odp_evtype != ODP_EVENT_PACKET && + odp_evtype != ODP_EVENT_BUFFER)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, escope, + "Inv. odp-event-type:%d", odp_evtype); + return EM_EVENT_UNDEF; + } + + /* Obtain the event-hdr, event-size and the pool to use */ + const event_hdr_t *ev_hdr; + uint32_t size; + em_event_type_t type; + em_pool_t em_pool = pool; + event_hdr_t *clone_hdr; + em_event_t clone_event; /* return value */ + + if (odp_evtype == ODP_EVENT_PACKET) { + pkt = odp_packet_from_event(odp_event); + ev_hdr = odp_packet_user_area(pkt); + size = odp_packet_seg_len(pkt); + if (pool == EM_POOL_UNDEF) { + odp_pool = odp_packet_pool(pkt); + em_pool = pool_odp2em(odp_pool); + } + } else /* ODP_EVENT_BUFFER */ { + buf = odp_buffer_from_event(odp_event); + ev_hdr = odp_buffer_user_area(buf); + size = ev_hdr->event_size; + if (pool == EM_POOL_UNDEF) { + odp_pool = odp_buffer_pool(buf); + em_pool = pool_odp2em(odp_pool); + } + } + + if (is_clone_part) { + if (EM_CHECK_LEVEL >= 1) { + uint64_t offset64 = offset; + uint64_t len64 = len; + uint64_t size64 = size; + + if (unlikely(len == 0 || offset64 + len64 > size64)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, escope, + "Inv.args: offset=%u len=%u (0 < offset+len <= %u)", + offset, len, size); + return EM_EVENT_UNDEF; + } + } + if (len < size) + size = len; + } + + /* No EM-pool found */ + if (em_pool == EM_POOL_UNDEF) { + if (unlikely(odp_evtype == ODP_EVENT_BUFFER)) { + INTERNAL_ERROR(EM_ERR_NOT_FOUND, escope, + "No suitable event-pool found"); + return EM_EVENT_UNDEF; + } + /* odp_evtype == ODP_EVENT_PACKET: + * Not an EM-pool, e.g. event from external pktio odp-pool. + * Allocate and clone pkt via ODP directly. + */ + clone_event = pkt_clone_odp(pkt, odp_pool, offset, size, is_clone_part); + if (unlikely(clone_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, escope, + "Cloning from ext odp-pool:%" PRIu64 " failed", + odp_pool_to_u64(odp_pool)); + } + return clone_event; + } + + /* + * Clone the event from an EM-pool: + */ + if (em_pool != pool) + pool_elem = pool_elem_get(em_pool); + type = ev_hdr->event_type; + + /* EM event pools created with type=SW can not support pkt events */ + if (unlikely(EM_CHECK_LEVEL > 0 && + pool_elem->event_type == EM_EVENT_TYPE_SW && + em_event_type_major(type) == EM_EVENT_TYPE_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, escope, + "EM-pool:%s(%" PRI_POOL "):\n" + "Invalid event type:0x%x for buf", + pool_elem->name, em_pool, type); + return EM_EVENT_UNDEF; + } + + if (EM_CHECK_LEVEL > 0 && + unlikely(clone_uarea && ev_hdr->user_area.isinit && + pool_elem->user_area.size < ev_hdr->user_area.size)) { + INTERNAL_ERROR(EM_ERR_TOO_SMALL, escope, + "EM-pool:%s(%" PRI_POOL "):\n" + "Available user-area too small, clone uarea %u < needed uarea %u", + pool_elem->name, em_pool, pool_elem->user_area.size, + ev_hdr->user_area.size); + return EM_EVENT_UNDEF; + } + + if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) + clone_hdr = event_alloc_pkt(pool_elem, size); + else /* EM_EVENT_TYPE_SW */ + clone_hdr = event_alloc_buf(pool_elem, size); + + if (unlikely(!clone_hdr)) { + em_status_t err = INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, escope, + "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", + pool_elem->name, size, type, em_pool); + if (EM_DEBUG_PRINT && err != EM_OK && + (pool_elem->stats_opt.bit.available || + pool_elem->stats_opt.bit.cache_available)) + em_pool_info_print(em_pool); + return EM_EVENT_UNDEF; + } + + /* Update event ESV state for alloc/clone */ + if (esv_enabled()) + (void)evstate_alloc(clone_hdr->event, clone_hdr, EVSTATE__EVENT_CLONE); + + clone_hdr->flags.all = 0; /* clear only after evstate_alloc() */ + clone_hdr->event_type = type; /* store the event type */ + clone_hdr->event_size = size; /* store requested size */ + clone_hdr->egrp = EM_EVENT_GROUP_UNDEF; + clone_hdr->user_area.all = ev_hdr->user_area.all; + clone_hdr->user_area.size = pool_elem->user_area.size; /* uarea size comes from pool */ + clone_hdr->user_area.isinit = 1; + + /* Copy the event uarea content if used */ + if (clone_uarea && + ev_hdr->user_area.isinit && ev_hdr->user_area.size > 0) { + const void *uarea_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t)); + void *clone_uarea_ptr = (void *)((uintptr_t)clone_hdr + sizeof(event_hdr_t)); + size_t sz = MIN(pool_elem->user_area.size, ev_hdr->user_area.size); + + memcpy(clone_uarea_ptr, uarea_ptr, sz); + } + + clone_event = clone_hdr->event; + + /* Copy event payload from the parent event into the clone event */ + uintptr_t src_addr = (uintptr_t)event_pointer(event) + offset; + const void *src = (void *)src_addr; + void *dst = event_pointer(clone_event); + + memcpy(dst, src, size); + + /* Call the 'alloc' API hook function also for event-clone */ + if (EM_API_HOOKS_ENABLE && clone_event != EM_EVENT_UNDEF) + call_api_hooks_alloc(&clone_event, 1, 1, size, type, pool); + + return clone_event; +} + +em_event_t em_event_clone(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/) +{ + return event_clone_part(event, pool, 0, 0, true, EM_ESCOPE_EVENT_CLONE); +} + +em_event_t em_event_clone_part(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/, + uint32_t offset, uint32_t len, bool clone_uarea) +{ + return event_clone_part(event, pool, offset, len, clone_uarea, + EM_ESCOPE_EVENT_CLONE_PART); +} + +static inline int +event_uarea_init(em_event_t event, event_hdr_t **ev_hdr/*out*/) +{ + const odp_event_t odp_event = event_em2odp(event); + const odp_event_type_t odp_evtype = odp_event_type(odp_event); + odp_pool_t odp_pool = ODP_POOL_INVALID; + odp_packet_t odp_pkt; + odp_buffer_t odp_buf; + odp_packet_vector_t odp_pktvec; + event_hdr_t *hdr; + bool is_init; + + switch (odp_evtype) { + case ODP_EVENT_PACKET: + odp_pkt = odp_packet_from_event(odp_event); + hdr = odp_packet_user_area(odp_pkt); + is_init = hdr->user_area.isinit; + if (!is_init) + odp_pool = odp_packet_pool(odp_pkt); + break; + case ODP_EVENT_BUFFER: + odp_buf = odp_buffer_from_event(odp_event); + hdr = odp_buffer_user_area(odp_buf); + is_init = hdr->user_area.isinit; + if (!is_init) + odp_pool = odp_buffer_pool(odp_buf); + break; + case ODP_EVENT_PACKET_VECTOR: + odp_pktvec = odp_packet_vector_from_event(odp_event); + hdr = odp_packet_vector_user_area(odp_pktvec); + is_init = hdr->user_area.isinit; + if (!is_init) + odp_pool = odp_packet_vector_pool(odp_pktvec); + break; + default: + return -1; + } + + *ev_hdr = hdr; + + if (!is_init) { + /* + * Event user area metadata is not initialized in + * the event header - initialize it: + */ + hdr->user_area.all = 0; /* user_area.{} = all zero (.sizes=0) */ + hdr->user_area.isinit = 1; + + em_pool_t pool = pool_odp2em(odp_pool); + + if (pool == EM_POOL_UNDEF) + return 0; /* ext ODP pool: OK, no user area, sz=0 */ + + /* Event from an EM event pool, can init event user area */ + const mpool_elem_t *pool_elem = pool_elem_get(pool); + + if (unlikely(!pool_elem)) + return -2; /* invalid pool_elem */ + + hdr->user_area.size = pool_elem->user_area.size; + } + + return 0; +} + +void *em_event_uarea_get(em_event_t event, size_t *size /*out, if given*/) +{ + /* Check args */ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_GET, + "Inv.arg: event undef"); + goto no_uarea; + } + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + if (EM_CHECK_LEVEL > 0 && unlikely(err)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_EVENT_UAREA_GET, + "Cannot init event user area: %d", err); + goto no_uarea; + } + + if (ev_hdr->user_area.size == 0) + goto no_uarea; + + /* + * Event has user area configured, return pointer and size + */ + void *uarea_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t)); + + if (size) + *size = ev_hdr->user_area.size; + + return uarea_ptr; + +no_uarea: + if (size) + *size = 0; + return NULL; +} + +em_status_t em_event_uarea_id_set(em_event_t event, uint16_t id) +{ + /* Check args */ + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_SET, + "Inv.arg: event undef"); + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(err, EM_ERR_OPERATION_FAILED, + EM_ESCOPE_EVENT_UAREA_ID_SET, + "Cannot init event user area: %d", err); + + ev_hdr->user_area.id = id; + ev_hdr->user_area.isset_id = 1; + + return EM_OK; +} + +em_status_t em_event_uarea_id_get(em_event_t event, bool *isset /*out*/, + uint16_t *id /*out*/) +{ + bool id_set = false; + em_status_t status = EM_OK; + + /* Check args, either 'isset' or 'id' ptrs must be provided (or both) */ + if (EM_CHECK_LEVEL > 0 && + (event == EM_EVENT_UNDEF || !(id || isset))) { + status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_GET, + "Inv.args: event:%" PRI_EVENT " isset:%p id:%p", + event, isset, id); + goto id_isset; + } + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + if (EM_CHECK_LEVEL > 0 && unlikely(err)) { + status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_EVENT_UAREA_ID_GET, + "Cannot init event user area: %d", err); + goto id_isset; + } + + if (ev_hdr->user_area.isset_id) { + /* user-area-id has been set */ + id_set = true; + if (id) + *id = ev_hdr->user_area.id; /*out*/ + } + +id_isset: + if (isset) + *isset = id_set; /*out*/ + return status; +} + +em_status_t em_event_uarea_info(em_event_t event, + em_event_uarea_info_t *uarea_info /*out*/) +{ + em_status_t status = EM_ERROR; + + /* Check args */ + if (EM_CHECK_LEVEL > 0 && + unlikely(event == EM_EVENT_UNDEF || !uarea_info)) { + status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_INFO, + "Inv.args: event:%" PRI_EVENT " uarea_info:%p", + event, uarea_info); + goto err_uarea; + } + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + if (EM_CHECK_LEVEL > 0 && unlikely(err)) { + status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_EVENT_UAREA_INFO, + "Cannot init event user area: %d", err); + goto err_uarea; + } + + if (ev_hdr->user_area.size == 0) { + uarea_info->uarea = NULL; + uarea_info->size = 0; + } else { + uarea_info->uarea = (void *)((uintptr_t)ev_hdr + + sizeof(event_hdr_t)); + uarea_info->size = ev_hdr->user_area.size; + } + + if (ev_hdr->user_area.isset_id) { + uarea_info->id.isset = true; + uarea_info->id.value = ev_hdr->user_area.id; + } else { + uarea_info->id.isset = false; + uarea_info->id.value = 0; + } + + return EM_OK; + +err_uarea: + if (uarea_info) { + uarea_info->uarea = NULL; + uarea_info->size = 0; + uarea_info->id.isset = false; + uarea_info->id.value = 0; + } + return status; +} + +em_event_t em_event_ref(em_event_t event) +{ + /* Check args */ + if (unlikely(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_REF, + "Invalid arg: event:%" PRI_EVENT "", event); + return EM_EVENT_UNDEF; + } + + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t odp_etype = odp_event_type(odp_event); + + if (EM_CHECK_LEVEL > 0 && unlikely(odp_etype != ODP_EVENT_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_REF, + "Event not a packet! Refs not supported for odp-events of type:%d", + odp_etype); + return EM_EVENT_UNDEF; + } + + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + odp_packet_t pkt_ref = odp_packet_ref_static(odp_pkt); + event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt); + + if (EM_CHECK_LEVEL > 0 && unlikely(pkt_ref == ODP_PACKET_INVALID)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_EVENT_REF, + "ODP failure in odp_packet_ref_static()"); + return EM_EVENT_UNDEF; + } + + if (unlikely(EM_CHECK_LEVEL >= 2 && odp_pkt != pkt_ref)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), EM_ESCOPE_EVENT_REF, + "EM assumes all refs use the same handle"); + odp_packet_free(odp_pkt); + return EM_EVENT_UNDEF; + } + + /* + * Indicate that this event has references and some of the ESV checks + * must be omitted (evgen) - 'refs_used' will be set for the whole + * lifetime of this event, i.e. until the event is freed back into the + * pool. Important only for the first call of em_event_ref(), subsequent + * calls write same value. + */ + ev_hdr->flags.refs_used = 1; + + em_event_t ref = event; + + if (esv_enabled()) + ref = evstate_ref(event, ev_hdr); + + return ref; +} + +bool em_event_has_ref(em_event_t event) +{ + /* Check args */ + if (unlikely(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_HAS_REF, + "Invalid arg: event:%" PRI_EVENT "", event); + return false; + } + + return event_has_ref(event); +} + +void em_event_vector_free(em_event_t vector_event) +{ + if (EM_CHECK_LEVEL > 0 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_FREE, + "Invalid args: vector_event:%" PRI_EVENT "", + vector_event); + return; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_FREE))) { + return; + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_free(&vector_event, 1); + + if (esv_enabled()) { + event_hdr_t *const ev_hdr = eventvec_to_hdr(vector_event); + + evstate_free(vector_event, ev_hdr, EVSTATE__EVENT_VECTOR_FREE); + } + + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); + + odp_packet_vector_free(pkt_vec); +} + +uint32_t em_event_vector_tbl(em_event_t vector_event, + em_event_t **event_tbl/*out*/) +{ + if (EM_CHECK_LEVEL > 0 && + unlikely(vector_event == EM_EVENT_UNDEF || !event_tbl)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_TBL, + "Invalid args: vector_event:%" PRI_EVENT " event_tbl:%p", + vector_event, event_tbl); + return 0; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_TBL))) { + *event_tbl = NULL; + return 0; + } + + return event_vector_tbl(vector_event, event_tbl /*out*/); +} + +uint32_t em_event_vector_size(em_event_t vector_event) +{ + if (EM_CHECK_LEVEL > 0 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_SIZE, + "Invalid arg, vector_event undefined!", vector_event); + return 0; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_SIZE))) + return 0; + + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); + + return odp_packet_vector_size(pkt_vec); +} + +void em_event_vector_size_set(em_event_t vector_event, uint32_t size) +{ + if (EM_CHECK_LEVEL > 0 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_SIZE_SET, + "Invalid arg, vector_event undefined!", vector_event); + return; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_SIZE_SET))) + return; + + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); + + odp_packet_vector_size_set(pkt_vec, size); +} + +uint32_t em_event_vector_max_size(em_event_t vector_event) +{ + if (EM_CHECK_LEVEL > 0 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_MAX_SIZE, + "Invalid arg, vector_event undefined!", vector_event); + return 0; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_MAX_SIZE))) + return 0; + + uint32_t max_size = 0; + em_status_t err = event_vector_max_size(vector_event, &max_size, + EM_ESCOPE_EVENT_VECTOR_MAX_SIZE); + if (unlikely(err != EM_OK)) + return 0; + + return max_size; +} + +em_status_t em_event_vector_info(em_event_t vector_event, + em_event_vector_info_t *vector_info /*out*/) +{ + em_status_t status = EM_ERROR; + + /* Check args */ + if (EM_CHECK_LEVEL > 0 && + unlikely(vector_event == EM_EVENT_UNDEF || !vector_info)) { + status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_INFO, + "Invalid args: vector_event:%" PRI_EVENT " vector_info:%p", + vector_event, vector_info); + goto err_vecinfo; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_INFO))) { + status = EM_ERR_BAD_TYPE; + goto err_vecinfo; + } + + /* Get the max size */ + status = event_vector_max_size(vector_event, &vector_info->max_size, + EM_ESCOPE_EVENT_VECTOR_INFO); + if (unlikely(status != EM_OK)) + goto err_vecinfo; + + /* Get vector size and the event-table */ + vector_info->size = event_vector_tbl(vector_event, &vector_info->event_tbl/*out*/); + + return EM_OK; + +err_vecinfo: + if (vector_info) { + vector_info->event_tbl = NULL; + vector_info->size = 0; + vector_info->max_size = 0; + } + return status; +} + +uint64_t em_event_to_u64(em_event_t event) +{ + return (uint64_t)event; +} diff --git a/src/event_machine_init.c b/src/event_machine_init.c index 7da717f2..1b560dde 100644 --- a/src/event_machine_init.c +++ b/src/event_machine_init.c @@ -1,471 +1,480 @@ -/* - * Copyright (c) 2018-2023, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine initialization and termination. - * - */ - -#include "em_include.h" - -/** EM shared memory */ -em_shm_t *em_shm; - -/** Core local variables */ -ENV_LOCAL em_locm_t em_locm ENV_CACHE_LINE_ALIGNED = { - .current.egrp = EM_EVENT_GROUP_UNDEF, - .current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE, - .local_queues.empty = 1, - .do_input_poll = false, - .do_output_drain = false, - .sync_api.in_progress = false - /* other members initialized to 0 or NULL as per C standard */ -}; - -void em_conf_init(em_conf_t *conf) -{ - if (unlikely(!conf)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_CONF_INIT, "Conf pointer NULL!"); - return; - } - memset(conf, 0, sizeof(em_conf_t)); - em_pool_cfg_init(&conf->default_pool_cfg); -} - -em_status_t em_init(const em_conf_t *conf) -{ - em_status_t stat; - int ret; - - RETURN_ERROR_IF(!conf, EM_FATAL(EM_ERR_BAD_ARG), EM_ESCOPE_INIT, - "Conf pointer NULL!"); - - stat = early_log_init(conf->log.log_fn, conf->log.vlog_fn); - RETURN_ERROR_IF(stat != EM_OK, EM_FATAL(stat), - EM_ESCOPE_INIT, "User provided log funcs invalid!"); - - /* Sanity check: em_shm should not be set yet */ - RETURN_ERROR_IF(em_shm != NULL, - EM_FATAL(EM_ERR_BAD_STATE), EM_ESCOPE_INIT, - "EM shared memory ptr set - already initialized?"); - /* Sanity check: either process- or thread-per-core, but not both */ - RETURN_ERROR_IF(!(conf->process_per_core ^ conf->thread_per_core), - EM_FATAL(EM_ERR_BAD_ARG), EM_ESCOPE_INIT, - "Select EITHER process-per-core OR thread-per-core!"); - - /* - * Reserve the EM shared memory once at start-up. - */ - uint32_t flags = 0; - odp_shm_capability_t shm_capa; - - ret = odp_shm_capability(&shm_capa); - RETURN_ERROR_IF(ret, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, - "shm capability error:%d", ret); - - if (shm_capa.flags & ODP_SHM_SINGLE_VA) - flags |= ODP_SHM_SINGLE_VA; - - odp_shm_t shm = odp_shm_reserve("em_shm", sizeof(em_shm_t), - ODP_CACHE_LINE_SIZE, flags); - - RETURN_ERROR_IF(shm == ODP_SHM_INVALID, EM_ERR_ALLOC_FAILED, - EM_ESCOPE_INIT, "Shared memory reservation failed!"); - - em_shm = odp_shm_addr(shm); - - RETURN_ERROR_IF(em_shm == NULL, EM_ERR_NOT_FOUND, EM_ESCOPE_INIT, - "Shared memory ptr NULL!"); - - memset(em_shm, 0, sizeof(em_shm_t)); - - /* Store shm handle, can be used in em_term() to free the memory */ - em_shm->this_shm = shm; - - /* Store the given EM configuration */ - em_shm->conf = *conf; - - if (!EM_API_HOOKS_ENABLE) { - memset(&em_shm->conf.api_hooks, 0, - sizeof(em_shm->conf.api_hooks)); - } - - /* Initialize the log & error handling */ - log_init(); - error_init(); - - /* Initialize libconfig */ - ret = em_libconfig_init_global(&em_shm->libconfig); - RETURN_ERROR_IF(ret != 0, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, - "libconfig initialization failed:%d", ret); - - /* - * Initialize the physical-core <-> EM-core mapping - * - * EM-core <-> ODP-thread id mappings cannot be set up yet, - * the ODP thread id is assigned only when that thread is initialized. - * Set this mapping in core_map_init_local() - */ - stat = core_map_init(&em_shm->core_map, conf->core_count, - &conf->phys_mask); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "core_map_init() failed:%" PRI_STAT "", stat); - - /* Initialize the EM event dispatcher */ - stat = dispatch_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "dispatch_init() failed:%" PRI_STAT "", stat); - - /* - * Check validity of core masks for input_poll_fn and output_drain_fn. - * - * Masks must be a subset of logical EM core mask. Zero mask means - * that input_poll_fn and output_drain_fn are run on all EM cores. - */ - stat = input_poll_check(&em_shm->core_map.logic_mask, conf); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "input_poll_init() failed:%" PRI_STAT "", stat); - stat = output_drain_check(&em_shm->core_map.logic_mask, conf); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "output_drain_init() failed:%" PRI_STAT "", stat); - - /* - * Initialize Event State Verification (ESV), if enabled at compile time - */ - if (EM_ESV_ENABLE) { - stat = esv_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "esv_init() failed:%" PRI_STAT "", stat); - } else { - esv_disabled_warn_config(); - } - - /* Initialize EM callbacks/hooks */ - stat = hooks_init(&conf->api_hooks, &conf->idle_hooks); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "hooks_init() failed:%" PRI_STAT "", stat); - - /* - * Initialize the EM buffer pools and create the EM_DEFAULT_POOL. - * Create also startup pools if configured in the runtime config - * file through option 'startup_pools'. - */ - stat = pool_init(&em_shm->mpool_tbl, &em_shm->mpool_pool, - &conf->default_pool_cfg); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "pool_init() failed:%" PRI_STAT "", stat); - - stat = event_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "event_init() failed:%" PRI_STAT "", stat); - - stat = event_group_init(&em_shm->event_group_tbl, - &em_shm->event_group_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "event_group_init() failed:%" PRI_STAT "", stat); - - stat = queue_init(&em_shm->queue_tbl, &em_shm->queue_pool, - &em_shm->queue_pool_static); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "queue_init() failed:%" PRI_STAT "", stat); - - stat = queue_group_init(&em_shm->queue_group_tbl, - &em_shm->queue_group_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "queue_group_init() failed:%" PRI_STAT "", stat); - - stat = atomic_group_init(&em_shm->atomic_group_tbl, - &em_shm->atomic_group_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "atomic_group_init() failed:%" PRI_STAT "", stat); - - stat = eo_init(&em_shm->eo_tbl, &em_shm->eo_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "eo_init() failed:%" PRI_STAT "", stat); - - stat = create_ctrl_queues(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "create_ctrl_queues() failed:%" PRI_STAT "", stat); - - /* Initialize EM Timer */ - if (conf->event_timer) { - stat = timer_init(&em_shm->timers); - RETURN_ERROR_IF(stat != EM_OK, - EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "timer_init() failed:%" PRI_STAT "", - stat); - } - - /* Initialize basic Event Chaining support */ - stat = chaining_init(&em_shm->event_chaining); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "chaining_init() failed:%" PRI_STAT "", stat); - - /* Initialize em_cli */ - stat = emcli_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "emcli_init() failed:%" PRI_STAT "", stat); - - /* - * Print EM and ODP version information - */ - print_version_info(); - - return EM_OK; -} - -em_status_t em_init_core(void) -{ - em_locm_t *const locm = &em_locm; - odp_shm_t shm; - em_shm_t *shm_addr; - em_status_t stat; - - /* Lookup the EM shared memory on each EM-core */ - shm = odp_shm_lookup("em_shm"); - RETURN_ERROR_IF(shm == ODP_SHM_INVALID, - EM_ERR_NOT_FOUND, EM_ESCOPE_INIT_CORE, - "Shared memory lookup failed!"); - - shm_addr = odp_shm_addr(shm); - RETURN_ERROR_IF(shm_addr == NULL, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, - "Shared memory ptr NULL"); - - if (shm_addr->conf.process_per_core && em_shm == NULL) - em_shm = shm_addr; - - RETURN_ERROR_IF(shm_addr != em_shm, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, - "Shared memory init fails: em_shm:%p != shm_addr:%p", - em_shm, shm_addr); - - /* Initialize core mappings not known yet in core_map_init() */ - stat = core_map_init_local(&em_shm->core_map); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "core_map_init_local() failed:%" PRI_STAT "", stat); - - stat = queue_group_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "queue_group_init_local() failed:%" PRI_STAT "", stat); - - stat = dispatch_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "dispatch_init_local() failed:%" PRI_STAT "", stat); - - /* Check if input_poll_fn should be executed on this core */ - stat = input_poll_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "input_poll_init_local() failed:%" PRI_STAT "", stat); - - /* Check if output_drain_fn should be executed on this core */ - stat = output_drain_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "output_drain_init_local() failed:%" PRI_STAT "", stat); - - stat = queue_init_local(); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_INIT_CORE, - "queue_init_local() failed:%" PRI_STAT "", stat); - - /* - * Initialize EM timer. If global init was not done (config), - * this is just a NOP - */ - stat = timer_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "timer_init_local() failed:%" PRI_STAT "", stat); - - stat = sync_api_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "sync_api_init_local() failed:%" PRI_STAT "", stat); - - /* Init the EM CLI locally on this core (only if enabled) */ - stat = emcli_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "emcli_init_local() failed:%" PRI_STAT "", stat); - - /* This is an EM-core that will participate in EM event dispatching */ - locm->is_external_thr = false; - - /* Initialize debug timestamps to 1 if enabled to differentiate from disabled */ - if (EM_DEBUG_TIMESTAMP_ENABLE) - for (int i = 0; i < EM_DEBUG_TSP_LAST; i++) - locm->debug_ts[i] = 1; - - /* Now OK to call EM APIs */ - - env_sync_mem(); - - return EM_OK; -} - -static void flush_scheduler_events(void) -{ - odp_event_t odp_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; - event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; - em_event_t em_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; - int num_events; - - do { - num_events = odp_schedule_multi_no_wait(NULL, odp_ev_tbl, EM_SCHED_MULTI_MAX_BURST); - /* the check 'num_events > EM_SCHED_MULTI_MAX_BURST' avoids a gcc warning */ - if (num_events <= 0 || num_events > EM_SCHED_MULTI_MAX_BURST) - break; - /* - * Events might originate from outside of EM and need init. - */ - event_init_odp_multi(odp_ev_tbl, em_ev_tbl/*out*/, ev_hdr_tbl/*out*/, - num_events, true/*is_extev*/); - em_free_multi(em_ev_tbl, num_events); - } while (num_events > 0); -} - -em_status_t em_term(const em_conf_t *conf) -{ - em_locm_t *const locm = &em_locm; - em_status_t stat; - int ret; - - (void)conf; - - /* - * Join all queue groups to be able to flush all events - * from the scheduler from this core. - */ - queue_group_join_all(); - - /* - * Flush all events in the scheduler. - * Run loop twice: first with sched enabled and then paused. - */ - if (locm->is_sched_paused) { - locm->is_sched_paused = false; - odp_schedule_resume(); - } - for (int i = 0; i < 2; i++) { - flush_scheduler_events(); - locm->is_sched_paused = true; - odp_schedule_pause(); - } - - stat = delete_ctrl_queues(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "delete_ctrl_queues() failed:%" PRI_STAT "", stat); - - if (em_shm->conf.event_timer) - timer_term(&em_shm->timers); - - stat = emcli_term(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "emcli_term() failed:%" PRI_STAT "", stat); - - stat = chaining_term(&em_shm->event_chaining); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "chaining_term() failed:%" PRI_STAT "", stat); - - ret = em_libconfig_term_global(&em_shm->libconfig); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "EM config term failed:%d"); - - stat = pool_term(&em_shm->mpool_tbl); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "pool_term() failed:%" PRI_STAT "", stat); - - env_shared_free(em_shm->queue_tbl.queue_elem); - - /* - * Free the EM shared memory - */ - ret = odp_shm_free(em_shm->this_shm); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "odp_shm_free() failed:%d", ret); - /* Set em_shm = NULL to allow a new call to em_init() */ - em_shm = NULL; - - return EM_OK; -} - -em_status_t em_term_core(void) -{ - em_status_t stat = EM_OK; - em_status_t ret_stat = EM_OK; - em_locm_t *const locm = &em_locm; - - /* - * Poll internal unscheduled ctrl queues to complete ctrl actions - * and flush them. - */ - poll_unsched_ctrl_queue(); - - /* - * Flush the scheduler from locally stashed events. - */ - if (!locm->is_sched_paused) { - locm->is_sched_paused = true; - odp_schedule_pause(); - } - flush_scheduler_events(); - - /* Stop EM Timer. Just a NOP if timer was not enabled (config) */ - stat = timer_term_local(); - if (stat != EM_OK) { - ret_stat = stat; - INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, - "timer_term_local() fails: %" PRI_STAT "", stat); - } - - /* Term the EM CLI locally (if enabled) */ - stat = emcli_term_local(); - if (stat != EM_OK) { - ret_stat = stat; - INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, - "emcli_term_local() fails: %" PRI_STAT "", stat); - } - - /* Delete the local queues */ - stat = queue_term_local(); - if (stat != EM_OK) { - ret_stat = stat; - INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, - "queue_term_local() fails: %" PRI_STAT "", stat); - } - - stat = core_map_term_local(&em_shm->core_map); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM_CORE, - "core_map_term_local() failed:%" PRI_STAT "", stat); - - return ret_stat == EM_OK ? EM_OK : EM_ERR; -} - -uint16_t em_device_id(void) -{ - return em_shm->conf.device_id; -} +/* + * Copyright (c) 2018-2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine initialization and termination. + * + */ + +#include "em_include.h" + +/** EM shared memory */ +em_shm_t *em_shm; + +/** Core local variables */ +ENV_LOCAL em_locm_t em_locm ENV_CACHE_LINE_ALIGNED = { + .current.egrp = EM_EVENT_GROUP_UNDEF, + .current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE, + .local_queues.empty = 1, + .do_input_poll = false, + .do_output_drain = false, + .sync_api.in_progress = false + /* other members initialized to 0 or NULL as per C standard */ +}; + +void em_conf_init(em_conf_t *conf) +{ + if (unlikely(!conf)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_CONF_INIT, "Conf pointer NULL!"); + return; + } + memset(conf, 0, sizeof(em_conf_t)); + em_pool_cfg_init(&conf->default_pool_cfg); + conf->__internal_check = EM_CHECK_INIT_CALLED; +} + +em_status_t em_init(const em_conf_t *conf) +{ + em_status_t stat; + int ret; + + RETURN_ERROR_IF(!conf, EM_FATAL(EM_ERR_BAD_ARG), EM_ESCOPE_INIT, + "Conf pointer NULL!"); + + RETURN_ERROR_IF(conf->__internal_check != EM_CHECK_INIT_CALLED, + EM_ERR_NOT_INITIALIZED, EM_ESCOPE_INIT, + "Not initialized: em_conf_init(conf) not called"); + + stat = early_log_init(conf->log.log_fn, conf->log.vlog_fn); + RETURN_ERROR_IF(stat != EM_OK, EM_FATAL(stat), + EM_ESCOPE_INIT, "User provided log funcs invalid!"); + + /* Sanity check: em_shm should not be set yet */ + RETURN_ERROR_IF(em_shm != NULL, + EM_FATAL(EM_ERR_BAD_STATE), EM_ESCOPE_INIT, + "EM shared memory ptr set - already initialized?"); + /* Sanity check: either process- or thread-per-core, but not both */ + RETURN_ERROR_IF(!(conf->process_per_core ^ conf->thread_per_core), + EM_FATAL(EM_ERR_BAD_ARG), EM_ESCOPE_INIT, + "Select EITHER process-per-core OR thread-per-core!"); + + /* + * Reserve the EM shared memory once at start-up. + */ + uint32_t flags = 0; + odp_shm_capability_t shm_capa; + + ret = odp_shm_capability(&shm_capa); + RETURN_ERROR_IF(ret, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, + "shm capability error:%d", ret); + + if (shm_capa.flags & ODP_SHM_SINGLE_VA) + flags |= ODP_SHM_SINGLE_VA; + + odp_shm_t shm = odp_shm_reserve("em_shm", sizeof(em_shm_t), + ODP_CACHE_LINE_SIZE, flags); + + RETURN_ERROR_IF(shm == ODP_SHM_INVALID, EM_ERR_ALLOC_FAILED, + EM_ESCOPE_INIT, "Shared memory reservation failed!"); + + em_shm = odp_shm_addr(shm); + + RETURN_ERROR_IF(em_shm == NULL, EM_ERR_NOT_FOUND, EM_ESCOPE_INIT, + "Shared memory ptr NULL!"); + + memset(em_shm, 0, sizeof(em_shm_t)); + + /* Store shm handle, can be used in em_term() to free the memory */ + em_shm->this_shm = shm; + + /* Store the given EM configuration */ + em_shm->conf = *conf; + + if (!EM_API_HOOKS_ENABLE) { + memset(&em_shm->conf.api_hooks, 0, + sizeof(em_shm->conf.api_hooks)); + } + + /* Initialize the log & error handling */ + log_init(); + error_init(); + + /* Initialize libconfig */ + ret = em_libconfig_init_global(&em_shm->libconfig); + RETURN_ERROR_IF(ret != 0, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, + "libconfig initialization failed:%d", ret); + + /* + * Initialize the physical-core <-> EM-core mapping + * + * EM-core <-> ODP-thread id mappings cannot be set up yet, + * the ODP thread id is assigned only when that thread is initialized. + * Set this mapping in core_map_init_local() + */ + stat = core_map_init(&em_shm->core_map, conf->core_count, + &conf->phys_mask); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "core_map_init() failed:%" PRI_STAT "", stat); + + /* Initialize the EM event dispatcher */ + stat = dispatch_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "dispatch_init() failed:%" PRI_STAT "", stat); + + /* + * Check validity of core masks for input_poll_fn and output_drain_fn. + * + * Masks must be a subset of logical EM core mask. Zero mask means + * that input_poll_fn and output_drain_fn are run on all EM cores. + */ + stat = input_poll_check(&em_shm->core_map.logic_mask, conf); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "input_poll_init() failed:%" PRI_STAT "", stat); + stat = output_drain_check(&em_shm->core_map.logic_mask, conf); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "output_drain_init() failed:%" PRI_STAT "", stat); + + /* + * Initialize Event State Verification (ESV), if enabled at compile time + */ + if (EM_ESV_ENABLE) { + stat = esv_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "esv_init() failed:%" PRI_STAT "", stat); + } else { + esv_disabled_warn_config(); + } + + /* Initialize EM callbacks/hooks */ + stat = hooks_init(&conf->api_hooks, &conf->idle_hooks); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "hooks_init() failed:%" PRI_STAT "", stat); + + /* + * Initialize the EM buffer pools and create the EM_DEFAULT_POOL. + * Create also startup pools if configured in the runtime config + * file through option 'startup_pools'. + */ + stat = pool_init(&em_shm->mpool_tbl, &em_shm->mpool_pool, + &conf->default_pool_cfg); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "pool_init() failed:%" PRI_STAT "", stat); + + stat = event_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "event_init() failed:%" PRI_STAT "", stat); + + stat = event_group_init(&em_shm->event_group_tbl, + &em_shm->event_group_stash); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "event_group_init() failed:%" PRI_STAT "", stat); + + stat = queue_init(&em_shm->queue_tbl, &em_shm->queue_pool, + &em_shm->queue_pool_static); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "queue_init() failed:%" PRI_STAT "", stat); + + stat = queue_group_init(&em_shm->queue_group_tbl, + &em_shm->queue_group_pool); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "queue_group_init() failed:%" PRI_STAT "", stat); + + stat = atomic_group_init(&em_shm->atomic_group_tbl, + &em_shm->atomic_group_pool); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "atomic_group_init() failed:%" PRI_STAT "", stat); + + stat = eo_init(&em_shm->eo_tbl, &em_shm->eo_pool); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "eo_init() failed:%" PRI_STAT "", stat); + + stat = create_ctrl_queues(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "create_ctrl_queues() failed:%" PRI_STAT "", stat); + + /* Initialize EM Timer */ + if (conf->event_timer) { + stat = timer_init(&em_shm->timers); + RETURN_ERROR_IF(stat != EM_OK, + EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "timer_init() failed:%" PRI_STAT "", + stat); + } + + /* Initialize basic Event Chaining support */ + stat = chaining_init(&em_shm->event_chaining); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "chaining_init() failed:%" PRI_STAT "", stat); + + /* Initialize em_cli */ + stat = emcli_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "emcli_init() failed:%" PRI_STAT "", stat); + + /* + * Print EM and ODP version information + */ + print_version_info(); + + return EM_OK; +} + +em_status_t em_init_core(void) +{ + em_locm_t *const locm = &em_locm; + odp_shm_t shm; + em_shm_t *shm_addr; + em_status_t stat; + + /* Lookup the EM shared memory on each EM-core */ + shm = odp_shm_lookup("em_shm"); + RETURN_ERROR_IF(shm == ODP_SHM_INVALID, + EM_ERR_NOT_FOUND, EM_ESCOPE_INIT_CORE, + "Shared memory lookup failed!"); + + shm_addr = odp_shm_addr(shm); + RETURN_ERROR_IF(shm_addr == NULL, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, + "Shared memory ptr NULL"); + + if (shm_addr->conf.process_per_core && em_shm == NULL) + em_shm = shm_addr; + + RETURN_ERROR_IF(shm_addr != em_shm, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, + "Shared memory init fails: em_shm:%p != shm_addr:%p", + em_shm, shm_addr); + + /* Initialize core mappings not known yet in core_map_init() */ + stat = core_map_init_local(&em_shm->core_map); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "core_map_init_local() failed:%" PRI_STAT "", stat); + + stat = queue_group_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "queue_group_init_local() failed:%" PRI_STAT "", stat); + + stat = dispatch_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "dispatch_init_local() failed:%" PRI_STAT "", stat); + + /* Check if input_poll_fn should be executed on this core */ + stat = input_poll_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "input_poll_init_local() failed:%" PRI_STAT "", stat); + + /* Check if output_drain_fn should be executed on this core */ + stat = output_drain_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "output_drain_init_local() failed:%" PRI_STAT "", stat); + + stat = queue_init_local(); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_INIT_CORE, + "queue_init_local() failed:%" PRI_STAT "", stat); + + /* + * Initialize EM timer. If global init was not done (config), + * this is just a NOP + */ + stat = timer_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "timer_init_local() failed:%" PRI_STAT "", stat); + + stat = sync_api_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "sync_api_init_local() failed:%" PRI_STAT "", stat); + + /* Init the EM CLI locally on this core (only if enabled) */ + stat = emcli_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "emcli_init_local() failed:%" PRI_STAT "", stat); + + /* This is an EM-core that will participate in EM event dispatching */ + locm->is_external_thr = false; + + /* Initialize debug timestamps to 1 if enabled to differentiate from disabled */ + if (EM_DEBUG_TIMESTAMP_ENABLE) + for (int i = 0; i < EM_DEBUG_TSP_LAST; i++) + locm->debug_ts[i] = 1; + + /* Now OK to call EM APIs */ + + env_sync_mem(); + + return EM_OK; +} + +static void flush_scheduler_events(void) +{ + odp_event_t odp_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; + event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; + em_event_t em_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; + int num_events; + + do { + num_events = odp_schedule_multi_no_wait(NULL, odp_ev_tbl, EM_SCHED_MULTI_MAX_BURST); + /* the check 'num_events > EM_SCHED_MULTI_MAX_BURST' avoids a gcc warning */ + if (num_events <= 0 || num_events > EM_SCHED_MULTI_MAX_BURST) + break; + /* + * Events might originate from outside of EM and need init. + */ + event_init_odp_multi(odp_ev_tbl, em_ev_tbl/*out*/, ev_hdr_tbl/*out*/, + num_events, true/*is_extev*/); + em_free_multi(em_ev_tbl, num_events); + } while (num_events > 0); +} + +em_status_t em_term(const em_conf_t *conf) +{ + em_locm_t *const locm = &em_locm; + em_status_t stat; + int ret; + + (void)conf; + + /* + * Join all queue groups to be able to flush all events + * from the scheduler from this core. + */ + queue_group_join_all(); + + /* + * Flush all events in the scheduler. + * Run loop twice: first with sched enabled and then paused. + */ + if (locm->is_sched_paused) { + locm->is_sched_paused = false; + odp_schedule_resume(); + } + for (int i = 0; i < 2; i++) { + flush_scheduler_events(); + locm->is_sched_paused = true; + odp_schedule_pause(); + } + + stat = delete_ctrl_queues(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "delete_ctrl_queues() failed:%" PRI_STAT "", stat); + + if (em_shm->conf.event_timer) + timer_term(&em_shm->timers); + + stat = emcli_term(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "emcli_term() failed:%" PRI_STAT "", stat); + + stat = chaining_term(&em_shm->event_chaining); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "chaining_term() failed:%" PRI_STAT "", stat); + + ret = em_libconfig_term_global(&em_shm->libconfig); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "EM config term failed:%d"); + + stat = pool_term(&em_shm->mpool_tbl); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "pool_term() failed:%" PRI_STAT "", stat); + + env_shared_free(em_shm->queue_tbl.queue_elem); + + stat = event_group_term(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "event_group_term() failed."); + + /* + * Free the EM shared memory + */ + ret = odp_shm_free(em_shm->this_shm); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "odp_shm_free() failed:%d", ret); + /* Set em_shm = NULL to allow a new call to em_init() */ + em_shm = NULL; + + return EM_OK; +} + +em_status_t em_term_core(void) +{ + em_status_t stat = EM_OK; + em_status_t ret_stat = EM_OK; + em_locm_t *const locm = &em_locm; + + /* + * Poll internal unscheduled ctrl queues to complete ctrl actions + * and flush them. + */ + poll_unsched_ctrl_queue(); + + /* + * Flush the scheduler from locally stashed events. + */ + if (!locm->is_sched_paused) { + locm->is_sched_paused = true; + odp_schedule_pause(); + } + flush_scheduler_events(); + + /* Stop EM Timer. Just a NOP if timer was not enabled (config) */ + stat = timer_term_local(); + if (stat != EM_OK) { + ret_stat = stat; + INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, + "timer_term_local() fails: %" PRI_STAT "", stat); + } + + /* Term the EM CLI locally (if enabled) */ + stat = emcli_term_local(); + if (stat != EM_OK) { + ret_stat = stat; + INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, + "emcli_term_local() fails: %" PRI_STAT "", stat); + } + + /* Delete the local queues */ + stat = queue_term_local(); + if (stat != EM_OK) { + ret_stat = stat; + INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, + "queue_term_local() fails: %" PRI_STAT "", stat); + } + + stat = core_map_term_local(&em_shm->core_map); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM_CORE, + "core_map_term_local() failed:%" PRI_STAT "", stat); + + return ret_stat == EM_OK ? EM_OK : EM_ERR; +} + +uint16_t em_device_id(void) +{ + return em_shm->conf.device_id; +} diff --git a/src/event_machine_timer.c b/src/event_machine_timer.c index 6bda48a7..b8084112 100644 --- a/src/event_machine_timer.c +++ b/src/event_machine_timer.c @@ -1,1976 +1,1950 @@ -/* - * Copyright (c) 2016, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * --------------------------------------------------------------------- - * Some notes about the implementation: - * - * EM Timer API is close to ODP timer, but there are issues - * making this code a bit more complex than it could be: - * - * 1) no periodic timer in ODP - * 2) unless using the pre-defined timeout event there is no way to access - * all necessary information runtime to implement a periodic timer - * - * Point 2 is solved by creating a timeout pool. When user allocates - * EM timeout, a new minimum size buffer is allocated to store all the needed - * information. Timer handle is a pointer to such buffer so all data is - * available via the handle (ack() is the most problematic case). This does - * create performance penalty, but so far it looks like the penalty is not - * too large and does simplify the code otherwise. Also timeouts could be - * pre-allocated as the API separates creation and arming. - * Most of the synchronization is handled by ODP timer, a ticketlock is used - * for high level management API. - * - */ -#include "em_include.h" - -/* timer handle = index + 1 (UNDEF 0) */ -#define TMR_I2H(x) ((em_timer_t)(uintptr_t)((x) + 1)) -#define TMR_H2I(x) ((int)((uintptr_t)(x) - 1)) - -static inline em_status_t timer_rv_odp2em(int odpret) -{ - switch (odpret) { - case ODP_TIMER_SUCCESS: - return EM_OK; - case ODP_TIMER_TOO_NEAR: - return EM_ERR_TOONEAR; - case ODP_TIMER_TOO_FAR: - return EM_ERR_TOOFAR; - default: - break; - } - - return EM_ERR_LIB_FAILED; -} - -static inline int is_queue_valid_type(em_timer_t tmr, const queue_elem_t *q_elem) -{ - unsigned int tmridx = (unsigned int)TMR_H2I(tmr); - - /* implementation specific */ - if (em_shm->timers.timer[tmridx].plain_q_ok && q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED) - return 1; - /* EM assumes scheduled always supported */ - return (q_elem->type == EM_QUEUE_TYPE_ATOMIC || - q_elem->type == EM_QUEUE_TYPE_PARALLEL || - q_elem->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) ? 1 : 0; - - /* LOCAL or OUTPUT queues not supported */ -} - -static inline bool is_event_type_valid(em_event_t event) -{ - em_event_type_t etype = em_event_type_major(em_event_get_type(event)); - - if (etype == EM_EVENT_TYPE_PACKET || - etype == EM_EVENT_TYPE_SW || - etype == EM_EVENT_TYPE_TIMER) - return true; - - /* limitations mainly set by odp spec, e.g. no vectors */ - return false; -} - -/* Helper for em_tmo_get_type() */ -static inline bool can_have_tmo_type(em_event_t event) -{ - em_event_type_t etype = em_event_type_major(em_event_get_type(event)); - - if (etype == EM_EVENT_TYPE_PACKET || - etype == EM_EVENT_TYPE_SW || - etype == EM_EVENT_TYPE_TIMER || - etype == EM_EVENT_TYPE_TIMER_IND) - return true; - - return false; -} - -static inline int is_timer_valid(em_timer_t tmr) -{ - unsigned int i; - const timer_storage_t *const tmrs = &em_shm->timers; - - if (unlikely(tmr == EM_TIMER_UNDEF)) - return 0; - - i = (unsigned int)TMR_H2I(tmr); - if (unlikely(i >= EM_ODP_MAX_TIMERS)) - return 0; - - if (unlikely(tmrs->timer[i].odp_tmr_pool == ODP_TIMER_POOL_INVALID || - tmrs->timer[i].tmo_pool == ODP_POOL_INVALID)) - return 0; - return 1; -} - -static inline em_status_t ack_ring_timeout_event(em_tmo_t tmo, - em_event_t ev, - em_tmo_state_t tmo_state, - event_hdr_t *ev_hdr, - odp_event_t odp_ev) -{ - (void)ev; - (void)tmo_state; - - if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND)) - return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK, - "Invalid event type:%u, expected timer-ring:%u", - ev_hdr->event_type, EM_EVENT_TYPE_TIMER_IND); - - if (EM_CHECK_LEVEL > 0 && unlikely(tmo != ev_hdr->tmo)) - return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK, - "Wrong event returned? tmo %p->%p", tmo, ev_hdr->tmo); - - int ret = odp_timer_periodic_ack(tmo->odp_timer, odp_ev); - - if (unlikely(ret < 0)) { /* failure */ - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK, - "Tmo ACK: ring timer odp ack fail, rv %d", ret); - } - - if (unlikely(ret == 2)) { /* cancelled, no more events coming */ - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; /* allows em_free */ - ev_hdr->tmo = EM_TMO_UNDEF; - atomic_thread_fence(memory_order_release); - TMR_DBG_PRINT("last periodic event %p\n", odp_ev); - return EM_ERR_CANCELED; - } - - /* ret = 1 would mean timer is cancelled, but more coming still. - * return ok to make ring and normal periodic behave the same - * e.g. CANCELED means tmo can now be deleted - */ - return EM_OK; -} - -static void cleanup_timer_create_fail(event_timer_t *timer) -{ - if (timer->tmo_pool != ODP_POOL_INVALID && - timer->tmo_pool != em_shm->timers.shared_tmo_pool) /* don't kill shared pool */ - odp_pool_destroy(timer->tmo_pool); - if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID) - odp_timer_pool_destroy(timer->odp_tmr_pool); - timer->tmo_pool = ODP_POOL_INVALID; - timer->odp_tmr_pool = ODP_TIMER_POOL_INVALID; - TMR_DBG_PRINT("cleaned up failed timer create\n"); -} - -static odp_pool_t create_tmo_handle_pool(uint32_t num_buf, uint32_t cache, const event_timer_t *tmr) -{ - odp_pool_param_t odp_pool_param; - odp_pool_t pool; - char tmo_pool_name[ODP_POOL_NAME_LEN]; - - odp_pool_param_init(&odp_pool_param); - odp_pool_param.type = ODP_POOL_BUFFER; - odp_pool_param.buf.size = sizeof(em_timer_timeout_t); - odp_pool_param.buf.align = ODP_CACHE_LINE_SIZE; - odp_pool_param.buf.cache_size = cache; - odp_pool_param.stats.all = 0; - TMR_DBG_PRINT("tmo handle pool cache %d\n", odp_pool_param.buf.cache_size); - - /* local pool caching may cause out of buffers situation on a core. Adjust */ - uint32_t num = num_buf + ((em_core_count() - 1) * odp_pool_param.buf.cache_size); - - if (num_buf != num) { - TMR_DBG_PRINT("Adjusted pool size %d->%d due to local caching (%d)\n", - num_buf, num, odp_pool_param.buf.cache_size); - } - odp_pool_param.buf.num = num; - snprintf(tmo_pool_name, ODP_POOL_NAME_LEN, "Tmo-pool-%d", tmr->idx); - pool = odp_pool_create(tmo_pool_name, &odp_pool_param); - if (pool != ODP_POOL_INVALID) { - TMR_DBG_PRINT("Created ODP-pool: %s for %d timeouts\n", - tmo_pool_name, odp_pool_param.buf.num); - } - return pool; -} - -static inline odp_event_t alloc_odp_timeout(em_tmo_t tmo) -{ - odp_timeout_t odp_tmo = odp_timeout_alloc(tmo->ring_tmo_pool); - - if (unlikely(odp_tmo == ODP_TIMEOUT_INVALID)) - return ODP_EVENT_INVALID; - - /* init EM event header */ - event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo); - odp_event_t odp_event = odp_timeout_to_event(odp_tmo); - em_event_t event = event_odp2em(odp_event); - - if (unlikely(!ev_hdr)) { - odp_timeout_free(odp_tmo); - return ODP_EVENT_INVALID; - } - - if (esv_enabled()) - event = evstate_alloc_tmo(event, ev_hdr); - ev_hdr->flags.all = 0; - ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; - ev_hdr->tmo = tmo; - ev_hdr->event_type = EM_EVENT_TYPE_TIMER_IND; - ev_hdr->event_size = 0; - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - ev_hdr->user_area.all = 0; - ev_hdr->user_area.isinit = 1; - - return odp_event; -} - -static inline void free_odp_timeout(odp_event_t odp_event) -{ - if (esv_enabled()) { - em_event_t event = event_odp2em(odp_event); - event_hdr_t *const ev_hdr = event_to_hdr(event); - - event = ev_hdr->event; - evstate_free(event, ev_hdr, EVSTATE__TMO_DELETE); - } - - odp_event_free(odp_event); -} - -static inline em_status_t handle_ack_noskip(em_event_t next_tmo_ev, - event_hdr_t *ev_hdr, - em_queue_t queue) -{ - if (esv_enabled()) - evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__NOSKIP); - - em_status_t err = em_send(next_tmo_ev, queue); - - if (unlikely(err != EM_OK)) { - err = INTERNAL_ERROR(err, EM_ESCOPE_TMO_ACK, "Tmo ACK: noskip em_send fail"); - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - } - - return err; /* EM_OK or send-failure */ -} - -static inline void handle_ack_skip(em_tmo_t tmo) -{ - uint64_t odpt = odp_timer_current_tick(tmo->odp_timer_pool); - uint64_t skips; - - if (odpt > tmo->last_tick) /* late, over next period */ - skips = ((odpt - tmo->last_tick) / tmo->period) + 1; - else - skips = 1; /* not yet over next period, but late for setting */ - - tmo->last_tick += skips * tmo->period; - TMR_DBG_PRINT("%lu skips * %lu ticks => new tgt %lu\n", - skips, tmo->period, tmo->last_tick); - if (EM_TIMER_TMO_STATS) - tmo->stats.num_period_skips += skips; -} - -static inline bool check_tmo_flags(em_tmo_flag_t flags) -{ - /* Check for valid tmo flags (oneshot OR periodic mainly) */ - if (unlikely(!(flags & (EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC)))) - return false; - - if (unlikely((flags & EM_TMO_FLAG_ONESHOT) && (flags & EM_TMO_FLAG_PERIODIC))) - return false; - - if (EM_CHECK_LEVEL > 1) { - em_tmo_flag_t inv_flags = ~(EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC | - EM_TMO_FLAG_NOSKIP); - if (unlikely(flags & inv_flags)) - return false; - } - return true; -} - -static inline bool check_timer_attr(const em_timer_attr_t *tmr_attr) -{ - if (unlikely(tmr_attr == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_CREATE, - "NULL ptr given"); - return false; - } - if (unlikely(tmr_attr->__internal_check != EM_CHECK_INIT_CALLED)) { - INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, - "em_timer_attr_t not initialized"); - return false; - } - if (unlikely(tmr_attr->resparam.res_ns && tmr_attr->resparam.res_hz)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, - "Only res_ns OR res_hz allowed"); - return false; - } - return true; -} - -static inline bool check_timer_attr_ring(const em_timer_attr_t *ring_attr) -{ - if (unlikely(ring_attr == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_RING_CREATE, - "NULL attr given"); - return false; - } - if (EM_CHECK_LEVEL > 0 && unlikely(ring_attr->__internal_check != EM_CHECK_INIT_CALLED)) { - INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_RING_CREATE, - "em_timer_ring_attr_t not initialized"); - return false; - } - - if (EM_CHECK_LEVEL > 1 && - unlikely(ring_attr->ringparam.base_hz.integer < 1 || - ring_attr->ringparam.max_mul < 1 || - (ring_attr->flags & EM_TIMER_FLAG_RING) == 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE, - "invalid attr values for ring timer"); - return false; - } - - return true; -} - -static inline int find_free_timer_index(void) -{ - /* - * Find a free timer-slot. - * This linear search should not be a performance problem with only a few timers - * available especially when these are typically created at startup. - * Assumes context is locked - */ - int i; - - for (i = 0; i < EM_ODP_MAX_TIMERS; i++) { - const event_timer_t *timer = &em_shm->timers.timer[i]; - - if (timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID) /* marks unused entry */ - break; - } - return i; -} - -void em_timer_attr_init(em_timer_attr_t *tmr_attr) -{ - if (unlikely(EM_CHECK_LEVEL > 0 && tmr_attr == NULL)) - return; /* just ignore NULL here */ - - /* clear/invalidate unused ring timer */ - memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t)); - - /* strategy: first put default resolution, then validate based on that */ - tmr_attr->resparam.res_ns = EM_ODP_TIMER_RESOL_DEF_NS; - tmr_attr->resparam.res_hz = 0; - tmr_attr->resparam.clk_src = EM_TIMER_CLKSRC_DEFAULT; - tmr_attr->flags = EM_TIMER_FLAG_NONE; - - odp_timer_clk_src_t odp_clksrc; - odp_timer_capability_t odp_capa; - odp_timer_res_capability_t odp_res_capa; - int err; - - err = timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_ATTR_INIT, - "Unsupported EM-timer clock source:%d", - tmr_attr->resparam.clk_src); - return; - } - err = odp_timer_capability(odp_clksrc, &odp_capa); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, - "Timer capability: ret %d, odp-clksrc:%d", - err, odp_clksrc); - return; - } - - TMR_DBG_PRINT("odp says highest res %lu\n", odp_capa.highest_res_ns); - if (unlikely(odp_capa.highest_res_ns > tmr_attr->resparam.res_ns)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, - "Timer capability: maxres %lu req %lu, odp-clksrc:%d!", - odp_capa.highest_res_ns, tmr_attr->resparam.res_ns, odp_clksrc); - return; - } - - memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); - odp_res_capa.res_ns = tmr_attr->resparam.res_ns; - err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, - "Timer res capability failed: ret %d, odp-clksrc:%d, res %lu", - err, odp_clksrc, tmr_attr->resparam.res_ns); - return; - } - - TMR_DBG_PRINT("res %lu -> ODP says min %lu, max %lu\n", - tmr_attr->resparam.res_ns, odp_res_capa.min_tmo, - odp_res_capa.max_tmo); - - tmr_attr->num_tmo = EM_ODP_DEFAULT_TMOS; - if (odp_capa.max_timers && odp_capa.max_timers < EM_ODP_DEFAULT_TMOS) - tmr_attr->num_tmo = odp_capa.max_timers; - - tmr_attr->resparam.min_tmo = odp_res_capa.min_tmo; - tmr_attr->resparam.max_tmo = odp_res_capa.max_tmo; - tmr_attr->name[0] = 0; /* timer_create will add default (no index available here) */ - tmr_attr->__internal_check = EM_CHECK_INIT_CALLED; -} - -em_status_t em_timer_ring_attr_init(em_timer_attr_t *ring_attr, - em_timer_clksrc_t clk_src, - uint64_t base_hz, - uint64_t max_mul, - uint64_t res_ns) -{ - if (unlikely(EM_CHECK_LEVEL > 0 && ring_attr == NULL)) - return EM_ERR_BAD_ARG; - - /* clear unused fields */ - memset(ring_attr, 0, sizeof(em_timer_attr_t)); - - ring_attr->ringparam.base_hz.integer = base_hz; - ring_attr->ringparam.clk_src = clk_src; - ring_attr->ringparam.max_mul = max_mul; - ring_attr->ringparam.res_ns = res_ns; /* 0 is legal and means odp default */ - ring_attr->num_tmo = EM_ODP_DEFAULT_RING_TMOS; - ring_attr->flags = EM_TIMER_FLAG_RING; - ring_attr->name[0] = 0; /* default at ring_create, index not known here */ - - odp_timer_clk_src_t odp_clksrc; - odp_timer_capability_t capa; - int rv = timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc); - - if (unlikely(rv)) - return EM_ERR_BAD_ARG; - if (unlikely(odp_timer_capability(odp_clksrc, &capa) != 0)) { - TMR_DBG_PRINT("odp_timer_capability returned error for clk_src %u\n", odp_clksrc); - return EM_ERR_BAD_ARG; /* assume clksrc not supported */ - } - - if (capa.periodic.max_pools == 0) /* no odp support */ - return EM_ERR_NOT_IMPLEMENTED; - - if (capa.periodic.max_timers < ring_attr->num_tmo) - ring_attr->num_tmo = capa.periodic.max_timers; - - odp_timer_periodic_capability_t pcapa; - - pcapa.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer; - pcapa.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer; - pcapa.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom; - pcapa.max_multiplier = ring_attr->ringparam.max_mul; - pcapa.res_ns = ring_attr->ringparam.res_ns; - rv = odp_timer_periodic_capability(odp_clksrc, &pcapa); - ring_attr->ringparam.res_ns = pcapa.res_ns; /* update back */ - ring_attr->ringparam.base_hz.integer = pcapa.base_freq_hz.integer; - ring_attr->ringparam.base_hz.numer = pcapa.base_freq_hz.numer; - ring_attr->ringparam.base_hz.denom = pcapa.base_freq_hz.denom; - if (pcapa.max_multiplier < ring_attr->ringparam.max_mul) /* don't increase here */ - ring_attr->ringparam.max_mul = pcapa.max_multiplier; - if (rv != 1) /* 1 means all values supported */ - return EM_ERR_BAD_ARG; - - ring_attr->__internal_check = EM_CHECK_INIT_CALLED; - return EM_OK; -} - -em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(capa == NULL)) { - EM_LOG(EM_LOG_DBG, "%s(): NULL capa ptr!\n", __func__); - return EM_ERR_BAD_POINTER; - } - - odp_timer_clk_src_t odp_clksrc; - odp_timer_capability_t odp_capa; - - if (unlikely(timer_clksrc_em2odp(clk_src, &odp_clksrc) || - odp_timer_capability(odp_clksrc, &odp_capa))) { - EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); - return EM_ERR_BAD_ARG; - } - - capa->max_timers = odp_capa.max_pools < EM_ODP_MAX_TIMERS ? - odp_capa.max_pools : EM_ODP_MAX_TIMERS; - capa->max_num_tmo = odp_capa.max_timers; - capa->max_res.clk_src = clk_src; - capa->max_res.res_ns = odp_capa.max_res.res_ns; - capa->max_res.res_hz = odp_capa.max_res.res_hz; - capa->max_res.min_tmo = odp_capa.max_res.min_tmo; - capa->max_res.max_tmo = odp_capa.max_res.max_tmo; - capa->max_tmo.clk_src = clk_src; - capa->max_tmo.res_ns = odp_capa.max_tmo.res_ns; - capa->max_tmo.res_hz = odp_capa.max_tmo.res_hz; - capa->max_tmo.min_tmo = odp_capa.max_tmo.min_tmo; - capa->max_tmo.max_tmo = odp_capa.max_tmo.max_tmo; - - /* ring timer basic capability */ - capa->ring.max_rings = odp_capa.periodic.max_pools; /* 0 if not supported */ - capa->ring.max_num_tmo = odp_capa.periodic.max_timers; - capa->ring.min_base_hz.integer = odp_capa.periodic.min_base_freq_hz.integer; - capa->ring.min_base_hz.numer = odp_capa.periodic.min_base_freq_hz.numer; - capa->ring.min_base_hz.denom = odp_capa.periodic.min_base_freq_hz.denom; - capa->ring.max_base_hz.integer = odp_capa.periodic.max_base_freq_hz.integer; - capa->ring.max_base_hz.numer = odp_capa.periodic.max_base_freq_hz.numer; - capa->ring.max_base_hz.denom = odp_capa.periodic.max_base_freq_hz.denom; - return EM_OK; -} - -em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(res == NULL)) { - EM_LOG(EM_LOG_DBG, "%s: NULL ptr res\n", __func__); - return EM_ERR_BAD_POINTER; - } - - odp_timer_clk_src_t odp_clksrc; - odp_timer_res_capability_t odp_res_capa; - int err; - - err = timer_clksrc_em2odp(clk_src, &odp_clksrc); - if (unlikely(err)) { - EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); - return EM_ERR_BAD_ARG; - } - memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); - odp_res_capa.res_ns = res->res_ns; - odp_res_capa.res_hz = res->res_hz; /* ODP will check if both were set */ - odp_res_capa.max_tmo = res->max_tmo; - err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); - if (unlikely(err)) { - EM_LOG(EM_LOG_DBG, "%s: ODP res_capability failed (ret %d)!\n", __func__, err); - return EM_ERR_BAD_ARG; - } - res->min_tmo = odp_res_capa.min_tmo; - res->max_tmo = odp_res_capa.max_tmo; - res->res_ns = odp_res_capa.res_ns; - res->res_hz = odp_res_capa.res_hz; - res->clk_src = clk_src; - return EM_OK; -} - -em_status_t em_timer_ring_capability(em_timer_ring_param_t *ring) -{ - odp_timer_clk_src_t odp_clksrc; - odp_timer_periodic_capability_t pcapa; - - if (EM_CHECK_LEVEL > 0 && unlikely(ring == NULL)) { - EM_LOG(EM_LOG_DBG, "%s: NULL ptr ring\n", __func__); - return EM_ERR_BAD_POINTER; - } - - if (unlikely(timer_clksrc_em2odp(ring->clk_src, &odp_clksrc))) { - EM_LOG(EM_LOG_DBG, "%s: Invalid clk_src %d\n", __func__, ring->clk_src); - return EM_ERR_BAD_ARG; - } - - pcapa.base_freq_hz.integer = ring->base_hz.integer; - pcapa.base_freq_hz.numer = ring->base_hz.numer; - pcapa.base_freq_hz.denom = ring->base_hz.denom; - pcapa.max_multiplier = ring->max_mul; - pcapa.res_ns = ring->res_ns; - int rv = odp_timer_periodic_capability(odp_clksrc, &pcapa); - - ring->base_hz.integer = pcapa.base_freq_hz.integer; - ring->base_hz.numer = pcapa.base_freq_hz.numer; - ring->base_hz.denom = pcapa.base_freq_hz.denom; - ring->max_mul = pcapa.max_multiplier; - ring->res_ns = pcapa.res_ns; - - if (unlikely(rv < 0)) { - EM_LOG(EM_LOG_DBG, "%s: odp failed periodic capability for clk_src %d\n", - __func__, ring->clk_src); - return EM_ERR_LIB_FAILED; - } - if (rv == 0) - return EM_ERR_NOT_SUPPORTED; /* no error, but no exact support */ - - return EM_OK; /* meet or exceed */ -} - -em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr) -{ - /* timers are initialized? */ - if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) { - INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, - "Timer is not initialized!"); - return EM_TIMER_UNDEF; - } - - if (EM_CHECK_LEVEL > 0) { - if (check_timer_attr(tmr_attr) == false) - return EM_TIMER_UNDEF; - } - - odp_timer_pool_param_t odp_tpool_param; - odp_timer_clk_src_t odp_clksrc; - - odp_timer_pool_param_init(&odp_tpool_param); - odp_tpool_param.res_ns = tmr_attr->resparam.res_ns; - odp_tpool_param.res_hz = tmr_attr->resparam.res_hz; - odp_tpool_param.min_tmo = tmr_attr->resparam.min_tmo; - odp_tpool_param.max_tmo = tmr_attr->resparam.max_tmo; - odp_tpool_param.num_timers = tmr_attr->num_tmo; - odp_tpool_param.priv = tmr_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0; - if (unlikely(timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, - "Unsupported EM-timer clock source:%d", - tmr_attr->resparam.clk_src); - return EM_TIMER_UNDEF; - } - odp_tpool_param.clk_src = odp_clksrc; - - /* check queue type support */ - odp_timer_capability_t capa; - - if (unlikely(odp_timer_capability(odp_clksrc, &capa))) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, - "ODP timer capa failed for clk:%d", - tmr_attr->resparam.clk_src); - return EM_TIMER_UNDEF; - } - if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */ - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, - "ODP does not support scheduled q for clk:%d", - tmr_attr->resparam.clk_src); - return EM_TIMER_UNDEF; - } - - odp_ticketlock_lock(&em_shm->timers.timer_lock); - - int i = find_free_timer_index(); - - if (unlikely(i >= EM_ODP_MAX_TIMERS)) { - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_CREATE, - "No more timers available"); - return EM_TIMER_UNDEF; - } - - event_timer_t *timer = &em_shm->timers.timer[i]; - char timer_pool_name[ODP_TIMER_POOL_NAME_LEN]; - const char *name = tmr_attr->name; - const char *reason = ""; - - if (tmr_attr->name[0] == '\0') { /* replace NULL with default */ - snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN, - "EM-timer-%d", timer->idx); /* idx initialized by timer_init */ - name = timer_pool_name; - } - - TMR_DBG_PRINT("Creating ODP tmr pool: clk %d, res_ns %lu, res_hz %lu\n", - odp_tpool_param.clk_src, odp_tpool_param.res_ns, - odp_tpool_param.res_hz); - timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param); - if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) { - reason = "odp_timer_pool_create error"; - goto error_locked; - } - TMR_DBG_PRINT("Created timer: %s with idx: %d\n", name, timer->idx); - - /* tmo handle pool can be per-timer or shared */ - if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */ - odp_pool_t opool = create_tmo_handle_pool(tmr_attr->num_tmo, - em_shm->opt.timer.tmo_pool_cache, timer); - - if (unlikely(opool == ODP_POOL_INVALID)) { - reason = "Tmo handle buffer pool create failed"; - goto error_locked; - } - - timer->tmo_pool = opool; - TMR_DBG_PRINT("Created per-timer tmo handle pool\n"); - } else { - if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */ - odp_pool_t opool = - create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size, - em_shm->opt.timer.tmo_pool_cache, timer); - - if (unlikely(opool == ODP_POOL_INVALID)) { - reason = "Shared tmo handle buffer pool create failed"; - goto error_locked; - } - timer->tmo_pool = opool; - em_shm->timers.shared_tmo_pool = opool; - TMR_DBG_PRINT("Created shared tmo handle pool for total %u tmos\n", - em_shm->opt.timer.shared_tmo_pool_size); - } else { - timer->tmo_pool = em_shm->timers.shared_tmo_pool; - } - } - - timer->num_tmo_reserve = tmr_attr->num_tmo; - if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */ - uint32_t left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved; - - if (timer->num_tmo_reserve > left) { - TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left); - reason = "Not enough tmos left in shared pool"; - goto error_locked; - } - em_shm->timers.reserved += timer->num_tmo_reserve; - TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n", - timer->num_tmo_reserve, em_shm->timers.reserved); - } - timer->flags = tmr_attr->flags; - timer->plain_q_ok = capa.queue_type_plain; - timer->is_ring = false; - -#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API - if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) { - reason = "odp_timer_pool_start_multi failed"; - goto error_locked; - } -#else - odp_timer_pool_start(); -#endif - em_shm->timers.num_timers++; - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - - TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), em_shm->timers.num_timers); - return TMR_I2H(i); - -error_locked: - cleanup_timer_create_fail(timer); - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - - TMR_DBG_PRINT("ERR odp tmr pool in: clk %u, res %lu, min %lu, max %lu, num %u\n", - odp_tpool_param.clk_src, odp_tpool_param.res_ns, - odp_tpool_param.min_tmo, odp_tpool_param.max_tmo, odp_tpool_param.num_timers); - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, - "Timer pool create failed, reason: ", reason); - return EM_TIMER_UNDEF; -} - -em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr) -{ - /* timers are initialized? */ - if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) { - INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, - "Timer is disabled!"); - return EM_TIMER_UNDEF; - } - - if (EM_CHECK_LEVEL > 0 && unlikely(check_timer_attr_ring(ring_attr) == false)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE, - "NULL or incorrect attribute"); - return EM_TIMER_UNDEF; - } - - odp_timer_pool_param_t odp_tpool_param; - odp_timer_clk_src_t odp_clksrc; - - odp_timer_pool_param_init(&odp_tpool_param); - odp_tpool_param.timer_type = ODP_TIMER_TYPE_PERIODIC; - odp_tpool_param.exp_mode = ODP_TIMER_EXP_AFTER; - odp_tpool_param.num_timers = ring_attr->num_tmo; - odp_tpool_param.priv = ring_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0; - if (unlikely(timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE, - "Unsupported EM-timer clock source:%d", - ring_attr->ringparam.clk_src); - return EM_TIMER_UNDEF; - } - odp_tpool_param.clk_src = odp_clksrc; - odp_tpool_param.periodic.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer; - odp_tpool_param.periodic.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer; - odp_tpool_param.periodic.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom; - odp_tpool_param.periodic.max_multiplier = ring_attr->ringparam.max_mul; - odp_tpool_param.res_hz = 0; - odp_tpool_param.res_ns = ring_attr->ringparam.res_ns; - - /* check queue type support */ - odp_timer_capability_t capa; - - if (unlikely(odp_timer_capability(odp_clksrc, &capa))) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE, - "ODP timer capa failed for clk:%d", - ring_attr->ringparam.clk_src); - return EM_TIMER_UNDEF; - } - if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */ - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE, - "ODP does not support scheduled q for clk:%d", - ring_attr->ringparam.clk_src); - return EM_TIMER_UNDEF; - } - - /* lock context to find free slot and update it */ - timer_storage_t *const tmrs = &em_shm->timers; - - odp_ticketlock_lock(&tmrs->timer_lock); - - /* is there enough events left in shared pool ? */ - uint32_t left = em_shm->opt.timer.ring.timer_event_pool_size - tmrs->ring_reserved; - - if (ring_attr->num_tmo > left) { - odp_ticketlock_unlock(&tmrs->timer_lock); - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE, - "Too few ring timeout events left (req %u/%u)", - ring_attr->num_tmo, left); - return EM_TIMER_UNDEF; - } - - /* allocate timer */ - int i = find_free_timer_index(); - - if (unlikely(i >= EM_ODP_MAX_TIMERS)) { - odp_ticketlock_unlock(&tmrs->timer_lock); - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE, - "No more timers available"); - return EM_TIMER_UNDEF; - } - - event_timer_t *timer = &tmrs->timer[i]; - - /* then timer pool */ - char timer_pool_name[ODP_TIMER_POOL_NAME_LEN]; - const char *name = ring_attr->name; - const char *reason = ""; - - if (ring_attr->name[0] == '\0') { /* replace NULL with default */ - snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN, - "EM-timer-%d", timer->idx); /* idx initialized by timer_init */ - name = timer_pool_name; - } - - TMR_DBG_PRINT("Creating ODP periodic tmr pool: clk %d, res_ns %lu, base_hz %lu\n", - odp_tpool_param.clk_src, odp_tpool_param.res_ns, - odp_tpool_param.periodic.base_freq_hz.integer); - timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param); - if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) { - reason = "odp_timer_pool_create failed"; - goto error_locked; - } - TMR_DBG_PRINT("Created ring timer: %s with idx: %d\n", name, timer->idx); - - /* tmo handle pool can be per-timer or shared */ - if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */ - odp_pool_t opool = create_tmo_handle_pool(ring_attr->num_tmo, - em_shm->opt.timer.tmo_pool_cache, timer); - - if (unlikely(opool == ODP_POOL_INVALID)) { - reason = "tmo handle pool creation failed"; - goto error_locked; - } - - timer->tmo_pool = opool; - TMR_DBG_PRINT("Created per-timer tmo handle pool %p\n", opool); - } else { - if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */ - odp_pool_t opool = - create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size, - em_shm->opt.timer.tmo_pool_cache, timer); - - if (unlikely(opool == ODP_POOL_INVALID)) { - reason = "Shared tmo handle pool creation failed"; - goto error_locked; - } - - timer->tmo_pool = opool; - em_shm->timers.shared_tmo_pool = opool; - TMR_DBG_PRINT("Created shared tmo handle pool %p\n", opool); - } else { - timer->tmo_pool = em_shm->timers.shared_tmo_pool; - } - } - - timer->num_tmo_reserve = ring_attr->num_tmo; - if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */ - left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved; - - if (timer->num_tmo_reserve > left) { - TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left); - reason = "Not enough tmos left in shared pool"; - goto error_locked; - } - em_shm->timers.reserved += timer->num_tmo_reserve; - TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n", - timer->num_tmo_reserve, em_shm->timers.reserved); - } - - /* odp timeout event pool for ring tmo events is always shared for all ring timers*/ - if (tmrs->ring_tmo_pool == ODP_POOL_INVALID) { - odp_pool_param_t odp_tmo_pool_param; - char pool_name[ODP_POOL_NAME_LEN]; - - odp_pool_param_init(&odp_tmo_pool_param); - odp_tmo_pool_param.type = ODP_POOL_TIMEOUT; - odp_tmo_pool_param.tmo.cache_size = em_shm->opt.timer.ring.timer_event_pool_cache; - TMR_DBG_PRINT("ring tmo event pool cache %u\n", odp_tmo_pool_param.tmo.cache_size); - odp_tmo_pool_param.tmo.num = em_shm->opt.timer.ring.timer_event_pool_size; - TMR_DBG_PRINT("ring tmo event pool size %u\n", odp_tmo_pool_param.tmo.num); - odp_tmo_pool_param.tmo.uarea_size = sizeof(event_hdr_t); - odp_tmo_pool_param.stats.all = 0; - snprintf(pool_name, ODP_POOL_NAME_LEN, "Ring-%d-tmo-pool", timer->idx); - tmrs->ring_tmo_pool = odp_pool_create(pool_name, &odp_tmo_pool_param); - if (unlikely(tmrs->ring_tmo_pool == ODP_POOL_INVALID)) { - reason = "odp timeout event pool creation failed"; - goto error_locked; - } - TMR_DBG_PRINT("Created ODP-timeout event pool %p: '%s'\n", - tmrs->ring_tmo_pool, pool_name); - } - - tmrs->ring_reserved += ring_attr->num_tmo; - TMR_DBG_PRINT("Updated ring reserve by +%u to %u\n", ring_attr->num_tmo, - tmrs->ring_reserved); - tmrs->num_rings++; - tmrs->num_timers++; - timer->num_ring_reserve = ring_attr->num_tmo; - timer->flags = ring_attr->flags; - timer->plain_q_ok = capa.queue_type_plain; - timer->is_ring = true; - tmrs->num_ring_create_calls++; - -#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API - if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) { - reason = "odp_timer_pool_start_multi failed"; - goto error_locked; - } -#else - odp_timer_pool_start(); -#endif - - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - - TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), tmrs->num_timers); - return TMR_I2H(i); - -error_locked: - cleanup_timer_create_fail(timer); - odp_ticketlock_unlock(&tmrs->timer_lock); - - TMR_DBG_PRINT("ERR odp tmr ring pool in: clk %u, res %lu, base_hz %lu, max_mul %lu, num tmo %u\n", - ring_attr->ringparam.clk_src, - ring_attr->ringparam.res_ns, - ring_attr->ringparam.base_hz.integer, - ring_attr->ringparam.max_mul, - ring_attr->num_tmo); - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE, - "Ring timer create failed, reason: ", reason); - return EM_TIMER_UNDEF; -} - -em_status_t em_timer_delete(em_timer_t tmr) -{ - timer_storage_t *const tmrs = &em_shm->timers; - int i = TMR_H2I(tmr); - em_status_t rv = EM_OK; - odp_pool_t pool_fail = ODP_POOL_INVALID; - - /* take lock before checking so nothing can change */ - odp_ticketlock_lock(&tmrs->timer_lock); - if (unlikely(!is_timer_valid(tmr))) { - odp_ticketlock_unlock(&tmrs->timer_lock); - return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_DELETE, - "Invalid timer:%" PRI_TMR "", tmr); - } - - if (tmrs->timer[i].tmo_pool != tmrs->shared_tmo_pool) { /* don't delete shared pool */ - if (unlikely(odp_pool_destroy(tmrs->timer[i].tmo_pool) != 0)) { - rv = EM_ERR_LIB_FAILED; - pool_fail = tmrs->timer[i].tmo_pool; - } else { - TMR_DBG_PRINT("Deleted odp pool %p\n", tmrs->timer[i].tmo_pool); - } - } - tmrs->timer[i].tmo_pool = ODP_POOL_INVALID; - odp_timer_pool_destroy(tmrs->timer[i].odp_tmr_pool); - tmrs->timer[i].odp_tmr_pool = ODP_TIMER_POOL_INVALID; - - /* Ring delete. Don't remove shared event pool as user could still have event */ - if (tmrs->timer[i].is_ring && tmrs->num_rings) { - tmrs->num_rings--; - if (tmrs->num_rings < 1) - TMR_DBG_PRINT("Last ring deleted"); - tmrs->ring_reserved -= tmrs->timer[i].num_ring_reserve; - TMR_DBG_PRINT("Updated ring reserve by -%u to %u\n", - tmrs->timer[i].num_ring_reserve, tmrs->ring_reserved); - tmrs->timer[i].num_ring_reserve = 0; - } - - tmrs->num_timers--; - if (tmrs->shared_tmo_pool != ODP_POOL_INVALID) { /* shared pool in use */ - tmrs->reserved -= tmrs->timer[i].num_tmo_reserve; - TMR_DBG_PRINT("Updated tmo reserve by -%u to %u\n", - tmrs->timer[i].num_tmo_reserve, tmrs->reserved); - tmrs->timer[i].num_tmo_reserve = 0; - } - if (tmrs->num_timers == 0 && tmrs->shared_tmo_pool != ODP_POOL_INVALID) { - /* no more timers, delete shared tmo pool */ - if (unlikely(odp_pool_destroy(tmrs->shared_tmo_pool) != 0)) { - rv = EM_ERR_LIB_FAILED; - pool_fail = tmrs->shared_tmo_pool; - } else { - TMR_DBG_PRINT("Deleted shared tmo pool %p\n", tmrs->shared_tmo_pool); - tmrs->shared_tmo_pool = ODP_POOL_INVALID; - } - } - - odp_ticketlock_unlock(&tmrs->timer_lock); - if (unlikely(rv != EM_OK)) { - return INTERNAL_ERROR(rv, EM_ESCOPE_TIMER_DELETE, - "timer %p delete fail, odp pool %p fail\n", tmr, pool_fail); - } - TMR_DBG_PRINT("ok, deleted timer %p, num_timers %u\n", tmr, tmrs->num_timers); - return rv; -} - -em_timer_tick_t em_timer_current_tick(em_timer_t tmr) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - int i = TMR_H2I(tmr); - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) - return 0; - - return odp_timer_current_tick(tmrs->timer[i].odp_tmr_pool); -} - -em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue) -{ - return em_tmo_create_arg(tmr, flags, queue, NULL); -} - -em_tmo_t em_tmo_create_arg(em_timer_t tmr, em_tmo_flag_t flags, - em_queue_t queue, em_tmo_args_t *args) -{ - const queue_elem_t *const q_elem = queue_elem_get(queue); - - if (EM_CHECK_LEVEL > 0) { - if (unlikely(!is_timer_valid(tmr))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, - "Invalid timer:%" PRI_TMR "", tmr); - return EM_TMO_UNDEF; - } - if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": inv.Q:%" PRI_QUEUE "", - tmr, queue); - return EM_TMO_UNDEF; - } - if (unlikely(!is_queue_valid_type(tmr, q_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": inv.Q (type):%" PRI_QUEUE "", - tmr, queue); - return EM_TMO_UNDEF; - } - if (unlikely(!check_tmo_flags(flags))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": inv. tmo-flags:0x%x", - tmr, flags); - return EM_TMO_UNDEF; - } - } - - int i = TMR_H2I(tmr); - - if (EM_CHECK_LEVEL > 1 && - em_shm->timers.timer[i].is_ring && - !(flags & EM_TMO_FLAG_PERIODIC)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": asking oneshot with ring timer!", - tmr); - return EM_TMO_UNDEF; - } - - odp_buffer_t tmo_buf = odp_buffer_alloc(em_shm->timers.timer[i].tmo_pool); - - if (unlikely(tmo_buf == ODP_BUFFER_INVALID)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": tmo pool exhausted", tmr); - return EM_TMO_UNDEF; - } - - em_timer_timeout_t *tmo = odp_buffer_addr(tmo_buf); - odp_timer_pool_t odptmr = em_shm->timers.timer[i].odp_tmr_pool; - - const void *userptr = NULL; - - if (args != NULL) - userptr = args->userptr; - - tmo->odp_timer = odp_timer_alloc(odptmr, q_elem->odp_queue, userptr); - if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": odp_timer_alloc() failed", tmr); - odp_buffer_free(tmo_buf); - return EM_TMO_UNDEF; - } - - /* OK, init state. Some values copied for faster access runtime */ - tmo->period = 0; - tmo->odp_timer_pool = odptmr; - tmo->timer = tmr; - tmo->odp_buffer = tmo_buf; - tmo->flags = flags; - tmo->queue = queue; - tmo->is_ring = em_shm->timers.timer[i].is_ring; - tmo->odp_timeout = ODP_EVENT_INVALID; - tmo->ring_tmo_pool = em_shm->timers.ring_tmo_pool; - - if (tmo->is_ring) { /* pre-allocate timeout event to save time at start */ - odp_event_t odp_tmo_event = alloc_odp_timeout(tmo); - - if (unlikely(odp_tmo_event == ODP_EVENT_INVALID)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE, - "Ring: odp timeout event allocation failed"); - odp_timer_free(tmo->odp_timer); - odp_buffer_free(tmo_buf); - return EM_TMO_UNDEF; - } - tmo->odp_timeout = odp_tmo_event; - TMR_DBG_PRINT("Ring: allocated odp timeout ev %p\n", tmo->odp_timeout); - } - - if (EM_TIMER_TMO_STATS) - memset(&tmo->stats, 0, sizeof(em_tmo_stats_t)); - - odp_atomic_init_u32(&tmo->state, EM_TMO_STATE_IDLE); - TMR_DBG_PRINT("ODP timer %p allocated\n", tmo->odp_timer); - TMR_DBG_PRINT("tmo %p created\n", tmo); - return tmo; -} - -em_status_t em_tmo_delete(em_tmo_t tmo, em_event_t *cur_event) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_DELETE, - "Invalid args: tmo:%" PRI_TMO " cur_event:%p", - tmo, cur_event); - } - - *cur_event = EM_EVENT_UNDEF; - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - if (EM_CHECK_LEVEL > 1) { - /* check that tmo buf is valid before accessing other struct members */ - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE, - "Invalid tmo buffer"); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_DELETE, - "Invalid tmo state:%d", tmo_state); - - RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID, - EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE, - "Invalid tmo odp_timer, deleted?"); - } - - TMR_DBG_PRINT("ODP timer %p\n", tmo->odp_timer); - - /* change this first to increase propability to catch e.g. double delete */ - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_UNKNOWN); - - odp_event_t odp_evt; - -#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API - /* ODP 1.43 does not allow to delete active timer, do it first */ - odp_evt = ODP_EVENT_INVALID; - if (tmo_state == EM_TMO_STATE_ACTIVE) { - int cret; - - if (tmo->is_ring) { - cret = odp_timer_periodic_cancel(tmo->odp_timer); - RETURN_ERROR_IF(cret != 0, EM_ERR_LIB_FAILED, - EM_ESCOPE_TMO_DELETE, - "ring active but odp timer cancel failed, rv %d\n", cret); - } else { - cret = odp_timer_cancel(tmo->odp_timer, &odp_evt); - RETURN_ERROR_IF(cret == ODP_TIMER_FAIL, EM_ERR_LIB_FAILED, - EM_ESCOPE_TMO_DELETE, - "was active but odp timer cancel failed, rv %d\n", cret); - } - - TMR_DBG_PRINT("tmo cancelled first, odp rv %d\n", cret); - } - - int fret = odp_timer_free(tmo->odp_timer); - - RETURN_ERROR_IF(fret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE, - "odp timer free failed!?, rv %d\n", fret); -#else - odp_evt = odp_timer_free(tmo->odp_timer); -#endif - odp_buffer_t tmp = tmo->odp_buffer; - em_event_t tmo_ev = EM_EVENT_UNDEF; - - tmo->odp_timer = ODP_TIMER_INVALID; - tmo->odp_buffer = ODP_BUFFER_INVALID; - tmo->timer = EM_TIMER_UNDEF; - - if (tmo->is_ring && tmo->odp_timeout != ODP_EVENT_INVALID) { - TMR_DBG_PRINT("ring: free unused ODP timeout ev %p\n", tmo->odp_timeout); - free_odp_timeout(tmo->odp_timeout); - tmo->odp_timeout = ODP_EVENT_INVALID; - } - - if (odp_evt != ODP_EVENT_INVALID) { - /* these errors no not free buffer to prevent potential further corruption */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && !odp_event_is_valid(odp_evt), - EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE, - "Corrupted tmo event returned"); - RETURN_ERROR_IF(tmo->is_ring, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE, - "odp_timer_free returned event %p for a ring!\n", odp_evt); - - tmo_ev = event_odp2em(odp_evt); - if (esv_enabled()) - tmo_ev = evstate_em2usr(tmo_ev, event_to_hdr(tmo_ev), EVSTATE__TMO_DELETE); - } - - odp_buffer_free(tmp); - *cur_event = tmo_ev; - TMR_DBG_PRINT("tmo %p delete ok, event returned %p\n", tmo, tmo_ev); - return EM_OK; -} - -em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, - em_event_t tmo_ev) -{ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF), - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, tmo_ev); - /* check that tmo buf is valid before accessing other struct members */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, - "Invalid tmo buffer"); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - (tmo->flags & EM_TMO_FLAG_PERIODIC), - EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_ABS, - "Cannot set periodic tmo, use _set_periodic()"); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && - !is_event_type_valid(tmo_ev), - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS, - "invalid event type"); - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_ABS, - "Invalid tmo state:%d", tmo_state); - } - RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && - tmo->odp_timer == ODP_TIMER_INVALID, - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, - "Invalid tmo odp_timer"); - - event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); - odp_event_t odp_ev = event_em2odp(tmo_ev); - bool esv_ena = esv_enabled(); - odp_timer_start_t startp; - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS, - "Invalid event type: timer-ring"); - - if (esv_ena) - evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS); - - /* set tmo active and arm with absolute time */ - startp.tick_type = ODP_TIMER_TICK_ABS; - startp.tick = ticks_abs; - startp.tmo_ev = odp_ev; - ev_hdr->flags.tmo_type = EM_TMO_TYPE_ONESHOT; - ev_hdr->tmo = tmo; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); - int odpret = odp_timer_start(tmo->odp_timer, &startp); - - if (unlikely(odpret != ODP_TIMER_SUCCESS)) { - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - if (esv_ena) - evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS__FAIL); - - em_status_t retval = timer_rv_odp2em(odpret); - - if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ - TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); - return retval; - } - - return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_ABS, - "odp_timer_start():%d", odpret); - } - TMR_DBG_PRINT("OK\n"); - return EM_OK; -} - -em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, - em_event_t tmo_ev) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, tmo_ev); - - RETURN_ERROR_IF(tmo->flags & EM_TMO_FLAG_PERIODIC, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, - "%s: Periodic no longer supported", __func__); - } - if (EM_CHECK_LEVEL > 1) { - /* check that tmo buf is valid before accessing other struct members */ - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_REL, - "Invalid tmo buffer"); - - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_REL, - "Invalid tmo state:%d", tmo_state); - } - RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && - !is_event_type_valid(tmo_ev), - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, - "invalid event type"); - - event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); - odp_event_t odp_ev = event_em2odp(tmo_ev); - bool esv_ena = esv_enabled(); - odp_timer_start_t startp; - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, - "Invalid event type: timer-ring"); - - if (esv_ena) - evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL); - - /* set tmo active and arm with relative time */ - startp.tick_type = ODP_TIMER_TICK_REL; - startp.tick = ticks_rel; - startp.tmo_ev = odp_ev; - ev_hdr->flags.tmo_type = EM_TMO_TYPE_ONESHOT; - ev_hdr->tmo = tmo; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); - int odpret = odp_timer_start(tmo->odp_timer, &startp); - - if (unlikely(odpret != ODP_TIMER_SUCCESS)) { - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - if (esv_ena) - evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL__FAIL); - - em_status_t retval = timer_rv_odp2em(odpret); - - if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ - TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); - return retval; - } - return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_REL, - "odp_timer_start():%d", odpret); - } - TMR_DBG_PRINT("OK\n"); - return EM_OK; -} - -em_status_t em_tmo_set_periodic(em_tmo_t tmo, - em_timer_tick_t start_abs, - em_timer_tick_t period, - em_event_t tmo_ev) -{ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF), - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, tmo_ev); - /* check that tmo buf is valid before accessing other struct members */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC, - "Invalid tmo buffer"); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), - EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC, - "Not periodic tmo"); - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC, - "Invalid tmo state:%d", tmo_state); - } - RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && - !is_event_type_valid(tmo_ev), - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC, - "invalid event type"); - - event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); - odp_event_t odp_ev = event_em2odp(tmo_ev); - bool esv_ena = esv_enabled(); - odp_timer_start_t startp; - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC, - "Invalid event type: timer-ring"); - - if (esv_ena) - evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC); - - TMR_DBG_PRINT("start %lu, period %lu\n", start_abs, period); - - tmo->period = period; - if (start_abs == 0) - start_abs = odp_timer_current_tick(tmo->odp_timer_pool) + period; - tmo->last_tick = start_abs; - TMR_DBG_PRINT("last_tick %lu, now %lu\n", tmo->last_tick, - odp_timer_current_tick(tmo->odp_timer_pool)); - - /* set tmo active and arm with absolute time */ - startp.tick_type = ODP_TIMER_TICK_ABS; - startp.tick = start_abs; - startp.tmo_ev = odp_ev; - ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; - ev_hdr->tmo = tmo; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); - int odpret = odp_timer_start(tmo->odp_timer, &startp); - - if (unlikely(odpret != ODP_TIMER_SUCCESS)) { - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - if (esv_ena) - evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC__FAIL); - - TMR_DBG_PRINT("diff to tmo %ld\n", - (int64_t)tmo->last_tick - - (int64_t)odp_timer_current_tick(tmo->odp_timer_pool)); - - em_status_t retval = timer_rv_odp2em(odpret); - - if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ - TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); - return retval; - } - return INTERNAL_ERROR(retval, - EM_ESCOPE_TMO_SET_PERIODIC, - "odp_timer_start():%d", odpret); - } - TMR_DBG_PRINT("OK\n"); - return EM_OK; -} - -em_status_t em_tmo_set_periodic_ring(em_tmo_t tmo, - em_timer_tick_t start_abs, - uint64_t multiplier, - em_event_t tmo_ev) -{ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo == EM_TMO_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING, - "Inv.args: tmo UNDEF"); - /* check that tmo buf is valid before accessing other struct members */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC_RING, - "Invalid tmo buffer"); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), - EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC_RING, - "Not periodic tmo"); - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC_RING, - "Invalid tmo state:%d", tmo_state); - } - - odp_timer_periodic_start_t startp; - odp_event_t odp_ev = tmo->odp_timeout; /* pre-allocated */ - - if (tmo_ev != EM_EVENT_UNDEF) { /* user gave event to (re-)use */ - odp_ev = event_em2odp(tmo_ev); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - odp_event_type(odp_ev) != ODP_EVENT_TIMEOUT, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING, - "Inv.args: not TIMER event given"); - odp_timeout_t odp_tmo = odp_timeout_from_event(odp_ev); - event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo); - - ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; - ev_hdr->tmo = tmo; - TMR_DBG_PRINT("user event %p\n", tmo_ev); - } else { - tmo->odp_timeout = ODP_EVENT_INVALID; /* now used */ - } - - if (odp_ev == ODP_EVENT_INVALID) { /* re-start, pre-alloc used */ - odp_event_t odp_tmo_event = alloc_odp_timeout(tmo); - - if (unlikely(odp_tmo_event == ODP_EVENT_INVALID)) - return INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_SET_PERIODIC_RING, - "Ring: odp timeout event allocation failed"); - odp_ev = odp_tmo_event; - } - - TMR_DBG_PRINT("ring tmo start_abs %lu, M=%lu, odp ev=%p\n", start_abs, multiplier, odp_ev); - startp.first_tick = start_abs; - startp.freq_multiplier = multiplier; - startp.tmo_ev = odp_ev; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); - int odpret = odp_timer_periodic_start(tmo->odp_timer, &startp); - - if (unlikely(odpret != ODP_TIMER_SUCCESS)) { - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - - em_status_t retval = timer_rv_odp2em(odpret); - - if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ - TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); - return retval; - } - return INTERNAL_ERROR(retval, - EM_ESCOPE_TMO_SET_PERIODIC_RING, - "odp_timer_periodic_start(): ret %d", odpret); - } - /* ok */ - TMR_DBG_PRINT("OK\n"); - return EM_OK; -} - -em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CANCEL, - "Invalid args: tmo:%" PRI_TMO " cur_event:%p", - tmo, cur_event); - } - *cur_event = EM_EVENT_UNDEF; - if (EM_CHECK_LEVEL > 1) { - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL, - "Invalid tmo buffer"); - RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID, - EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL, - "Invalid tmo odp_timer"); - } - - /* check state: EM_TMO_STATE_UNKNOWN | EM_TMO_STATE_IDLE | EM_TMO_STATE_ACTIVE */ - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state != EM_TMO_STATE_ACTIVE, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, - "Invalid tmo state:%d (!%d)", tmo_state, EM_TMO_STATE_ACTIVE); - - TMR_DBG_PRINT("ODP tmo %p\n", tmo->odp_timer); - - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - - if (tmo->is_ring) { /* periodic ring never returns event here */ - RETURN_ERROR_IF(odp_timer_periodic_cancel(tmo->odp_timer) != 0, - EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL, - "odp periodic cancel fail"); - return EM_ERR_TOONEAR; /* ack will tell when no more coming */ - } - - /* not ring, cancel*/ - odp_event_t odp_ev = ODP_EVENT_INVALID; - int ret = odp_timer_cancel(tmo->odp_timer, &odp_ev); - - if (ret != 0) { /* speculative, odp does not today separate fail and too late */ - if (EM_CHECK_LEVEL > 1) { - RETURN_ERROR_IF(odp_ev != ODP_EVENT_INVALID, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, - "Bug? ODP timer cancel fail but return event!"); - } - TMR_DBG_PRINT("fail, odpret %d. Assume TOONEAR\n", ret); - return EM_ERR_TOONEAR; /* expired, other cases caught above */ - } - - /* - * Cancel successful (ret == 0): odp_ev contains the canceled tmo event - */ - - if (EM_CHECK_LEVEL > 2) { - RETURN_ERROR_IF(!odp_event_is_valid(odp_ev), - EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL, - "Invalid tmo event from odp_timer_cancel"); - } - - em_event_t tmo_ev = event_odp2em(odp_ev); - event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); - - /* successful cancel also resets the event tmo type */ - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - - if (esv_enabled()) - tmo_ev = evstate_em2usr(tmo_ev, ev_hdr, EVSTATE__TMO_CANCEL); - - *cur_event = tmo_ev; - TMR_DBG_PRINT("OK\n"); - return EM_OK; -} - -em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev) -{ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - (tmo == EM_TMO_UNDEF || next_tmo_ev == EM_EVENT_UNDEF), - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, next_tmo_ev); - /* check that tmo buf is valid before accessing other struct members */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_ACK, - "Tmo ACK: invalid tmo buffer"); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), - EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_ACK, - "Tmo ACK: Not a periodic tmo"); - - if (EM_TIMER_TMO_STATS) - tmo->stats.num_acks++; - - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - event_hdr_t *ev_hdr = event_to_hdr(next_tmo_ev); - odp_event_t odp_ev = event_em2odp(next_tmo_ev); - - if (tmo->is_ring) /* ring timer */ - return ack_ring_timeout_event(tmo, next_tmo_ev, tmo_state, ev_hdr, odp_ev); - - /* not periodic ring, set next timeout */ - if (unlikely(tmo_state != EM_TMO_STATE_ACTIVE)) { - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - - if (tmo_state == EM_TMO_STATE_IDLE) /* canceled, skip errorhandler */ - return EM_ERR_CANCELED; - - return INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_ACK, - "Tmo ACK: invalid tmo state:%d", tmo_state); - } - - bool esv_ena = esv_enabled(); - - if (esv_ena) - evstate_usr2em(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK); - /* - * The periodic timer will silently stop if ack fails! Attempt to - * handle exceptions and if the tmo cannot be renewed, call - * the errorhandler so the application may recover. - */ - tmo->last_tick += tmo->period; /* maintain absolute time */ - int ret; - int tries = EM_TIMER_ACK_TRIES; - em_status_t err; - odp_timer_start_t startp; - - startp.tick_type = ODP_TIMER_TICK_ABS; - startp.tmo_ev = odp_ev; - ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; /* could be new event */ - ev_hdr->tmo = tmo; - - /* try to set tmo EM_TIMER_ACK_TRIES times */ - do { - /* ask new timeout for next period */ - startp.tick = tmo->last_tick; - ret = odp_timer_start(tmo->odp_timer, &startp); - /* - * Calling ack() was delayed over next period if 'ret' is - * ODP_TIMER_TOO_NEAR, i.e. now in past. Other errors - * should not happen, fatal for this tmo - */ - if (likely(ret != ODP_TIMER_TOO_NEAR)) { - if (ret != ODP_TIMER_SUCCESS) { - TMR_DBG_PRINT("ODP return %d\n" - "tmo tgt/tick now %lu/%lu\n", - ret, tmo->last_tick, - odp_timer_current_tick(tmo->odp_timer_pool)); - } - break; /* ok */ - } - - /* ODP_TIMER_TOO_NEAR: ack() delayed beyond next time slot */ - if (EM_TIMER_TMO_STATS) - tmo->stats.num_late_ack++; - TMR_DBG_PRINT("late, tgt/now %lu/%lu\n", tmo->last_tick, - odp_timer_current_tick(tmo->odp_timer_pool)); - - if (tmo->flags & EM_TMO_FLAG_NOSKIP) /* not allowed to skip, send immediately */ - return handle_ack_noskip(next_tmo_ev, ev_hdr, tmo->queue); - - /* skip already passed periods and try again */ - handle_ack_skip(tmo); - - tries--; - if (unlikely(tries < 1)) { - err = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, - EM_ESCOPE_TMO_ACK, - "Tmo ACK: too many retries:%u", - EM_TIMER_ACK_TRIES); - goto ack_err; - } - } while (ret != ODP_TIMER_SUCCESS); - - if (unlikely(ret != ODP_TIMER_SUCCESS)) { - err = INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK, - "Tmo ACK: failed to renew tmo (odp ret %d)", - ret); - goto ack_err; - } - return EM_OK; - -ack_err: - /* fail, restore event state */ - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - if (esv_ena) - evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__FAIL); - return err; -} - -int em_timer_get_all(em_timer_t *tmr_list, int max) -{ - odp_ticketlock_lock(&em_shm->timers.timer_lock); - - const uint32_t num_timers = em_shm->timers.num_timers; - - if (tmr_list && max > 0 && num_timers > 0) { - int num = 0; - - for (int i = 0; i < EM_ODP_MAX_TIMERS; i++) { - if (em_shm->timers.timer[i].odp_tmr_pool != ODP_TIMER_POOL_INVALID) { - tmr_list[num] = TMR_I2H(i); - num++; - if (num >= max) - break; - } - } - } - - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - - return num_timers; -} - -em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr) -{ - odp_timer_pool_info_t poolinfo; - int i = TMR_H2I(tmr); - int ret; - em_timer_clksrc_t clk = EM_TIMER_CLKSRC_DEFAULT; - - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(!is_timer_valid(tmr) || tmr_attr == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_ATTR, - "Inv.args: timer:%" PRI_TMR " tmr_attr:%p", - tmr, tmr_attr); - - /* get current values from ODP */ - ret = odp_timer_pool_info(em_shm->timers.timer[i].odp_tmr_pool, &poolinfo); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_GET_ATTR, - "ODP timer pool info failed"); - - timer_clksrc_odp2em(poolinfo.param.clk_src, &clk); - - if (poolinfo.param.timer_type == ODP_TIMER_TYPE_SINGLE) { - tmr_attr->resparam.res_ns = poolinfo.param.res_ns; - tmr_attr->resparam.res_hz = poolinfo.param.res_hz; - tmr_attr->resparam.max_tmo = poolinfo.param.max_tmo; - tmr_attr->resparam.min_tmo = poolinfo.param.min_tmo; - tmr_attr->resparam.clk_src = clk; - memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t)); - } else { - tmr_attr->ringparam.base_hz.integer = poolinfo.param.periodic.base_freq_hz.integer; - tmr_attr->ringparam.base_hz.numer = poolinfo.param.periodic.base_freq_hz.numer; - tmr_attr->ringparam.base_hz.denom = poolinfo.param.periodic.base_freq_hz.denom; - tmr_attr->ringparam.max_mul = poolinfo.param.periodic.max_multiplier; - tmr_attr->ringparam.res_ns = poolinfo.param.res_ns; - tmr_attr->ringparam.clk_src = clk; - memset(&tmr_attr->resparam, 0, sizeof(em_timer_res_param_t)); - } - - tmr_attr->num_tmo = poolinfo.param.num_timers; - tmr_attr->flags = em_shm->timers.timer[i].flags; - - strncpy(tmr_attr->name, poolinfo.name, EM_TIMER_NAME_LEN - 1); - tmr_attr->name[EM_TIMER_NAME_LEN - 1] = '\0'; - return EM_OK; -} - -uint64_t em_timer_get_freq(em_timer_t tmr) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_FREQ, - "Invalid timer:%" PRI_TMR "", tmr); - return 0; - } - - return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, - 1000ULL * 1000ULL * 1000ULL); /* 1 sec */ -} - -uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_TICK_TO_NS, - "Invalid timer:%" PRI_TMR "", tmr); - return 0; - } - - return odp_timer_tick_to_ns(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ticks); -} - -em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_NS_TO_TICK, - "Invalid timer:%" PRI_TMR "", tmr); - return 0; - } - - return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ns); -} - -em_tmo_state_t em_tmo_get_state(em_tmo_t tmo) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo"); - return EM_TMO_STATE_UNKNOWN; - } - if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo buffer"); - return EM_TMO_STATE_UNKNOWN; - } - - return odp_atomic_load_acq_u32(&tmo->state); -} - -em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat) -{ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo == EM_TMO_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATS, - "Invalid tmo"); - /* check that tmo buf is valid before accessing other struct members */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATS, - "Invalid tmo buffer"); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo->odp_timer == ODP_TIMER_INVALID, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATS, - "tmo deleted?"); - - if (EM_TIMER_TMO_STATS) { - if (stat) - *stat = tmo->stats; - } else { - return EM_ERR_NOT_IMPLEMENTED; - } - - return EM_OK; -} - -em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid event given"); - return EM_TMO_TYPE_NONE; - } - - event_hdr_t *ev_hdr = event_to_hdr(event); - em_tmo_type_t type = (em_tmo_type_t)ev_hdr->flags.tmo_type; - - if (EM_CHECK_LEVEL > 1 && unlikely(!can_have_tmo_type(event))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, - "Invalid event type"); - return EM_TMO_TYPE_NONE; - } - - if (EM_CHECK_LEVEL > 2 && unlikely(type > EM_TMO_TYPE_PERIODIC)) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATE, - "Invalid tmo event type, header corrupted?"); - return EM_TMO_TYPE_NONE; - } - - if (tmo) - *tmo = (type == EM_TMO_TYPE_NONE) ? EM_TMO_UNDEF : ev_hdr->tmo; - - if (reset && ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND) { - ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; - ev_hdr->tmo = EM_TMO_UNDEF; - } - - return type; -} - -void *em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_USERPTR, "Invalid event given"); - return NULL; - } - - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t evtype = odp_event_type(odp_event); - - if (unlikely(evtype != ODP_EVENT_TIMEOUT)) /* no errorhandler for other events */ - return NULL; - - event_hdr_t *ev_hdr = event_to_hdr(event); /* will not return on error */ - - if (tmo) /* always periodic timeout here */ - *tmo = ev_hdr->tmo; - - return odp_timeout_user_ptr(odp_timeout_from_event(odp_event)); -} - -em_timer_t em_tmo_get_timer(em_tmo_t tmo) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Invalid tmo given"); - return EM_TIMER_UNDEF; - } - if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Corrupted tmo?"); - return EM_TIMER_UNDEF; - } - - return tmo->timer; -} - -uint64_t em_timer_to_u64(em_timer_t timer) -{ - return (uint64_t)timer; -} - -uint64_t em_tmo_to_u64(em_tmo_t tmo) -{ - return (uint64_t)tmo; -} +/* + * Copyright (c) 2016, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * --------------------------------------------------------------------- + * Some notes about the implementation: + * + * EM Timer API is close to ODP timer, but there are issues + * making this code a bit more complex than it could be: + * + * 1) there is no generic periodic timer in ODP + * 2) unless using the pre-defined timeout event there is no way to access + * all necessary information runtime to implement a periodic timer + * + * Point 2 is solved by creating a timeout pool. When user allocates + * EM timeout, a new minimum size buffer is allocated to store all the needed + * information. Timer handle is a pointer to such buffer so all data is + * available via the handle (ack() is the most problematic case). This does + * create performance penalty, but so far it looks like the penalty is not + * too large and does simplify the code otherwise. Also timeouts could be + * pre-allocated as the API separates creation and arming. + * Most of the synchronization is handled by ODP timer, a ticketlock is used + * for high level management API. + * + */ +#include "em_include.h" + +/* timer handle = index + 1 (UNDEF 0) */ +#define TMR_I2H(x) ((em_timer_t)(uintptr_t)((x) + 1)) +#define TMR_H2I(x) ((int)((uintptr_t)(x) - 1)) + +static inline em_status_t timer_rv_odp2em(int odpret) +{ + switch (odpret) { + case ODP_TIMER_SUCCESS: + return EM_OK; + case ODP_TIMER_TOO_NEAR: + return EM_ERR_TOONEAR; + case ODP_TIMER_TOO_FAR: + return EM_ERR_TOOFAR; + default: + break; + } + + return EM_ERR_LIB_FAILED; +} + +static inline int is_queue_valid_type(em_timer_t tmr, const queue_elem_t *q_elem) +{ + unsigned int tmridx = (unsigned int)TMR_H2I(tmr); + + /* implementation specific */ + if (em_shm->timers.timer[tmridx].plain_q_ok && q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED) + return 1; + /* EM assumes scheduled always supported */ + return (q_elem->type == EM_QUEUE_TYPE_ATOMIC || + q_elem->type == EM_QUEUE_TYPE_PARALLEL || + q_elem->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) ? 1 : 0; + + /* LOCAL or OUTPUT queues not supported */ +} + +static inline bool is_event_type_valid(em_event_t event) +{ + em_event_type_t etype = em_event_type_major(em_event_get_type(event)); + + if (etype == EM_EVENT_TYPE_PACKET || + etype == EM_EVENT_TYPE_SW || + etype == EM_EVENT_TYPE_TIMER) + return true; + + /* limitations mainly set by odp spec, e.g. no vectors */ + return false; +} + +/* Helper for em_tmo_get_type() */ +static inline bool can_have_tmo_type(em_event_t event) +{ + em_event_type_t etype = em_event_type_major(em_event_get_type(event)); + + if (etype == EM_EVENT_TYPE_PACKET || + etype == EM_EVENT_TYPE_SW || + etype == EM_EVENT_TYPE_TIMER || + etype == EM_EVENT_TYPE_TIMER_IND) + return true; + + return false; +} + +static inline int is_timer_valid(em_timer_t tmr) +{ + unsigned int i; + const timer_storage_t *const tmrs = &em_shm->timers; + + if (unlikely(tmr == EM_TIMER_UNDEF)) + return 0; + + i = (unsigned int)TMR_H2I(tmr); + if (unlikely(i >= EM_ODP_MAX_TIMERS)) + return 0; + + if (unlikely(tmrs->timer[i].odp_tmr_pool == ODP_TIMER_POOL_INVALID || + tmrs->timer[i].tmo_pool == ODP_POOL_INVALID)) + return 0; + return 1; +} + +static inline em_status_t ack_ring_timeout_event(em_tmo_t tmo, + em_event_t ev, + em_tmo_state_t tmo_state, + event_hdr_t *ev_hdr, + odp_event_t odp_ev) +{ + (void)ev; + (void)tmo_state; + + if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND)) + return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK, + "Invalid event type:%u, expected timer-ring:%u", + ev_hdr->event_type, EM_EVENT_TYPE_TIMER_IND); + + if (EM_CHECK_LEVEL > 0 && unlikely(tmo != ev_hdr->tmo)) + return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK, + "Wrong event returned? tmo %p->%p", tmo, ev_hdr->tmo); + + int ret = odp_timer_periodic_ack(tmo->odp_timer, odp_ev); + + if (unlikely(ret < 0)) { /* failure */ + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK, + "Tmo ACK: ring timer odp ack fail, rv %d", ret); + } + + if (unlikely(ret == 2)) { /* cancelled, no more events coming */ + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; /* allows em_free */ + ev_hdr->tmo = EM_TMO_UNDEF; + atomic_thread_fence(memory_order_release); + TMR_DBG_PRINT("last periodic event %p\n", odp_ev); + return EM_ERR_CANCELED; + } + + /* ret = 1 would mean timer is cancelled, but more coming still. + * return ok to make ring and normal periodic behave the same + * e.g. CANCELED means tmo can now be deleted + */ + return EM_OK; +} + +static void cleanup_timer_create_fail(event_timer_t *timer) +{ + if (timer->tmo_pool != ODP_POOL_INVALID && + timer->tmo_pool != em_shm->timers.shared_tmo_pool) /* don't kill shared pool */ + odp_pool_destroy(timer->tmo_pool); + if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID) + odp_timer_pool_destroy(timer->odp_tmr_pool); + timer->tmo_pool = ODP_POOL_INVALID; + timer->odp_tmr_pool = ODP_TIMER_POOL_INVALID; + TMR_DBG_PRINT("cleaned up failed timer create\n"); +} + +static odp_pool_t create_tmo_handle_pool(uint32_t num_buf, uint32_t cache, const event_timer_t *tmr) +{ + odp_pool_param_t odp_pool_param; + odp_pool_t pool; + char tmo_pool_name[ODP_POOL_NAME_LEN]; + + odp_pool_param_init(&odp_pool_param); + odp_pool_param.type = ODP_POOL_BUFFER; + odp_pool_param.buf.size = sizeof(em_timer_timeout_t); + odp_pool_param.buf.align = ODP_CACHE_LINE_SIZE; + odp_pool_param.buf.cache_size = cache; + odp_pool_param.stats.all = 0; + TMR_DBG_PRINT("tmo handle pool cache %d\n", odp_pool_param.buf.cache_size); + + /* local pool caching may cause out of buffers situation on a core. Adjust */ + uint32_t num = num_buf + ((em_core_count() - 1) * odp_pool_param.buf.cache_size); + + if (num_buf != num) { + TMR_DBG_PRINT("Adjusted pool size %d->%d due to local caching (%d)\n", + num_buf, num, odp_pool_param.buf.cache_size); + } + odp_pool_param.buf.num = num; + snprintf(tmo_pool_name, ODP_POOL_NAME_LEN, "Tmo-pool-%d", tmr->idx); + pool = odp_pool_create(tmo_pool_name, &odp_pool_param); + if (pool != ODP_POOL_INVALID) { + TMR_DBG_PRINT("Created ODP-pool: %s for %d timeouts\n", + tmo_pool_name, odp_pool_param.buf.num); + } + return pool; +} + +static inline odp_event_t alloc_odp_timeout(em_tmo_t tmo) +{ + odp_timeout_t odp_tmo = odp_timeout_alloc(tmo->ring_tmo_pool); + + if (unlikely(odp_tmo == ODP_TIMEOUT_INVALID)) + return ODP_EVENT_INVALID; + + /* init EM event header */ + event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo); + odp_event_t odp_event = odp_timeout_to_event(odp_tmo); + em_event_t event = event_odp2em(odp_event); + + if (unlikely(!ev_hdr)) { + odp_timeout_free(odp_tmo); + return ODP_EVENT_INVALID; + } + + if (esv_enabled()) + event = evstate_alloc_tmo(event, ev_hdr); + ev_hdr->flags.all = 0; + ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; + ev_hdr->tmo = tmo; + ev_hdr->event_type = EM_EVENT_TYPE_TIMER_IND; + ev_hdr->event_size = 0; + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdr->user_area.all = 0; + ev_hdr->user_area.isinit = 1; + + return odp_event; +} + +static inline void free_odp_timeout(odp_event_t odp_event) +{ + if (esv_enabled()) { + em_event_t event = event_odp2em(odp_event); + event_hdr_t *const ev_hdr = event_to_hdr(event); + + event = ev_hdr->event; + evstate_free(event, ev_hdr, EVSTATE__TMO_DELETE); + } + + odp_event_free(odp_event); +} + +static inline em_status_t handle_ack_noskip(em_event_t next_tmo_ev, + event_hdr_t *ev_hdr, + em_queue_t queue) +{ + if (esv_enabled()) + evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__NOSKIP); + + em_status_t err = em_send(next_tmo_ev, queue); + + if (unlikely(err != EM_OK)) { + err = INTERNAL_ERROR(err, EM_ESCOPE_TMO_ACK, "Tmo ACK: noskip em_send fail"); + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + } + + return err; /* EM_OK or send-failure */ +} + +static inline void handle_ack_skip(em_tmo_t tmo) +{ + uint64_t odpt = odp_timer_current_tick(tmo->odp_timer_pool); + uint64_t skips; + + if (odpt > tmo->last_tick) /* late, over next period */ + skips = ((odpt - tmo->last_tick) / tmo->period) + 1; + else + skips = 1; /* not yet over next period, but late for setting */ + + tmo->last_tick += skips * tmo->period; + TMR_DBG_PRINT("%lu skips * %lu ticks => new tgt %lu\n", + skips, tmo->period, tmo->last_tick); + if (EM_TIMER_TMO_STATS) + tmo->stats.num_period_skips += skips; +} + +static inline bool check_tmo_flags(em_tmo_flag_t flags) +{ + /* Check for valid tmo flags (oneshot OR periodic mainly) */ + if (unlikely(!(flags & (EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC)))) + return false; + + if (unlikely((flags & EM_TMO_FLAG_ONESHOT) && (flags & EM_TMO_FLAG_PERIODIC))) + return false; + + if (EM_CHECK_LEVEL > 1) { + em_tmo_flag_t inv_flags = ~(EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC | + EM_TMO_FLAG_NOSKIP); + if (unlikely(flags & inv_flags)) + return false; + } + return true; +} + +static inline bool check_timer_attr(const em_timer_attr_t *tmr_attr) +{ + if (unlikely(tmr_attr == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_CREATE, + "NULL ptr given"); + return false; + } + if (unlikely(tmr_attr->__internal_check != EM_CHECK_INIT_CALLED)) { + INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, + "Not initialized: em_timer_attr_init(tmr_attr) not called"); + return false; + } + if (unlikely(tmr_attr->resparam.res_ns && tmr_attr->resparam.res_hz)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, + "Only res_ns OR res_hz allowed"); + return false; + } + return true; +} + +static inline bool check_timer_attr_ring(const em_timer_attr_t *ring_attr) +{ + if (unlikely(ring_attr == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_RING_CREATE, + "NULL attr given"); + return false; + } + if (EM_CHECK_LEVEL > 0 && unlikely(ring_attr->__internal_check != EM_CHECK_INIT_CALLED)) { + INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_RING_CREATE, + "Not initialized: em_timer_ring_attr_init(ring_attr) not called"); + return false; + } + + if (EM_CHECK_LEVEL > 1 && + unlikely(ring_attr->ringparam.base_hz.integer < 1 || + ring_attr->ringparam.max_mul < 1 || + (ring_attr->flags & EM_TIMER_FLAG_RING) == 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE, + "Invalid attr values for ring timer"); + return false; + } + + return true; +} + +static inline int find_free_timer_index(void) +{ + /* + * Find a free timer-slot. + * This linear search should not be a performance problem with only a few timers + * available especially when these are typically created at startup. + * Assumes context is locked + */ + int i; + + for (i = 0; i < EM_ODP_MAX_TIMERS; i++) { + const event_timer_t *timer = &em_shm->timers.timer[i]; + + if (timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID) /* marks unused entry */ + break; + } + return i; +} + +void em_timer_attr_init(em_timer_attr_t *tmr_attr) +{ + if (unlikely(EM_CHECK_LEVEL > 0 && tmr_attr == NULL)) + return; /* just ignore NULL here */ + + /* clear/invalidate unused ring timer */ + memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t)); + + /* strategy: first put default resolution, then validate based on that */ + tmr_attr->resparam.res_ns = EM_ODP_TIMER_RESOL_DEF_NS; + tmr_attr->resparam.res_hz = 0; + tmr_attr->resparam.clk_src = EM_TIMER_CLKSRC_DEFAULT; + tmr_attr->flags = EM_TIMER_FLAG_NONE; + + odp_timer_clk_src_t odp_clksrc; + odp_timer_capability_t odp_capa; + odp_timer_res_capability_t odp_res_capa; + int err; + + err = timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_ATTR_INIT, + "Unsupported EM-timer clock source:%d", + tmr_attr->resparam.clk_src); + return; + } + err = odp_timer_capability(odp_clksrc, &odp_capa); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, + "Timer capability: ret %d, odp-clksrc:%d", + err, odp_clksrc); + return; + } + + TMR_DBG_PRINT("odp says highest res %lu\n", odp_capa.highest_res_ns); + if (unlikely(odp_capa.highest_res_ns > tmr_attr->resparam.res_ns)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, + "Timer capability: maxres %lu req %lu, odp-clksrc:%d!", + odp_capa.highest_res_ns, tmr_attr->resparam.res_ns, odp_clksrc); + return; + } + + memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); + odp_res_capa.res_ns = tmr_attr->resparam.res_ns; + err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, + "Timer res capability failed: ret %d, odp-clksrc:%d, res %lu", + err, odp_clksrc, tmr_attr->resparam.res_ns); + return; + } + + TMR_DBG_PRINT("res %lu -> ODP says min %lu, max %lu\n", + tmr_attr->resparam.res_ns, odp_res_capa.min_tmo, + odp_res_capa.max_tmo); + + tmr_attr->num_tmo = EM_ODP_DEFAULT_TMOS; + if (odp_capa.max_timers && odp_capa.max_timers < EM_ODP_DEFAULT_TMOS) + tmr_attr->num_tmo = odp_capa.max_timers; + + tmr_attr->resparam.min_tmo = odp_res_capa.min_tmo; + tmr_attr->resparam.max_tmo = odp_res_capa.max_tmo; + tmr_attr->name[0] = 0; /* timer_create will add default (no index available here) */ + tmr_attr->__internal_check = EM_CHECK_INIT_CALLED; +} + +em_status_t em_timer_ring_attr_init(em_timer_attr_t *ring_attr, + em_timer_clksrc_t clk_src, + uint64_t base_hz, + uint64_t max_mul, + uint64_t res_ns) +{ + if (unlikely(EM_CHECK_LEVEL > 0 && ring_attr == NULL)) + return EM_ERR_BAD_ARG; + + /* clear unused fields */ + memset(ring_attr, 0, sizeof(em_timer_attr_t)); + + ring_attr->ringparam.base_hz.integer = base_hz; + ring_attr->ringparam.clk_src = clk_src; + ring_attr->ringparam.max_mul = max_mul; + ring_attr->ringparam.res_ns = res_ns; /* 0 is legal and means odp default */ + ring_attr->num_tmo = EM_ODP_DEFAULT_RING_TMOS; + ring_attr->flags = EM_TIMER_FLAG_RING; + ring_attr->name[0] = 0; /* default at ring_create, index not known here */ + + odp_timer_clk_src_t odp_clksrc; + odp_timer_capability_t capa; + int rv = timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc); + + if (unlikely(rv)) + return EM_ERR_BAD_ARG; + if (unlikely(odp_timer_capability(odp_clksrc, &capa) != 0)) { + TMR_DBG_PRINT("odp_timer_capability returned error for clk_src %u\n", odp_clksrc); + return EM_ERR_BAD_ARG; /* assume clksrc not supported */ + } + + if (capa.periodic.max_pools == 0) /* no odp support */ + return EM_ERR_NOT_IMPLEMENTED; + + if (capa.periodic.max_timers < ring_attr->num_tmo) + ring_attr->num_tmo = capa.periodic.max_timers; + + odp_timer_periodic_capability_t pcapa; + + pcapa.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer; + pcapa.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer; + pcapa.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom; + pcapa.max_multiplier = ring_attr->ringparam.max_mul; + pcapa.res_ns = ring_attr->ringparam.res_ns; + rv = odp_timer_periodic_capability(odp_clksrc, &pcapa); + ring_attr->ringparam.res_ns = pcapa.res_ns; /* update back */ + ring_attr->ringparam.base_hz.integer = pcapa.base_freq_hz.integer; + ring_attr->ringparam.base_hz.numer = pcapa.base_freq_hz.numer; + ring_attr->ringparam.base_hz.denom = pcapa.base_freq_hz.denom; + if (pcapa.max_multiplier < ring_attr->ringparam.max_mul) /* don't increase here */ + ring_attr->ringparam.max_mul = pcapa.max_multiplier; + if (rv != 1) /* 1 means all values supported */ + return EM_ERR_BAD_ARG; + + ring_attr->__internal_check = EM_CHECK_INIT_CALLED; + return EM_OK; +} + +em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(capa == NULL)) { + EM_LOG(EM_LOG_DBG, "%s(): NULL capa ptr!\n", __func__); + return EM_ERR_BAD_POINTER; + } + + odp_timer_clk_src_t odp_clksrc; + odp_timer_capability_t odp_capa; + + if (unlikely(timer_clksrc_em2odp(clk_src, &odp_clksrc) || + odp_timer_capability(odp_clksrc, &odp_capa))) { + EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); + return EM_ERR_BAD_ARG; + } + + capa->max_timers = odp_capa.max_pools < EM_ODP_MAX_TIMERS ? + odp_capa.max_pools : EM_ODP_MAX_TIMERS; + capa->max_num_tmo = odp_capa.max_timers; + capa->max_res.clk_src = clk_src; + capa->max_res.res_ns = odp_capa.max_res.res_ns; + capa->max_res.res_hz = odp_capa.max_res.res_hz; + capa->max_res.min_tmo = odp_capa.max_res.min_tmo; + capa->max_res.max_tmo = odp_capa.max_res.max_tmo; + capa->max_tmo.clk_src = clk_src; + capa->max_tmo.res_ns = odp_capa.max_tmo.res_ns; + capa->max_tmo.res_hz = odp_capa.max_tmo.res_hz; + capa->max_tmo.min_tmo = odp_capa.max_tmo.min_tmo; + capa->max_tmo.max_tmo = odp_capa.max_tmo.max_tmo; + + /* ring timer basic capability */ + capa->ring.max_rings = odp_capa.periodic.max_pools; /* 0 if not supported */ + capa->ring.max_num_tmo = odp_capa.periodic.max_timers; + capa->ring.min_base_hz.integer = odp_capa.periodic.min_base_freq_hz.integer; + capa->ring.min_base_hz.numer = odp_capa.periodic.min_base_freq_hz.numer; + capa->ring.min_base_hz.denom = odp_capa.periodic.min_base_freq_hz.denom; + capa->ring.max_base_hz.integer = odp_capa.periodic.max_base_freq_hz.integer; + capa->ring.max_base_hz.numer = odp_capa.periodic.max_base_freq_hz.numer; + capa->ring.max_base_hz.denom = odp_capa.periodic.max_base_freq_hz.denom; + return EM_OK; +} + +em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(res == NULL)) { + EM_LOG(EM_LOG_DBG, "%s: NULL ptr res\n", __func__); + return EM_ERR_BAD_POINTER; + } + + odp_timer_clk_src_t odp_clksrc; + odp_timer_res_capability_t odp_res_capa; + int err; + + err = timer_clksrc_em2odp(clk_src, &odp_clksrc); + if (unlikely(err)) { + EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); + return EM_ERR_BAD_ARG; + } + memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); + odp_res_capa.res_ns = res->res_ns; + odp_res_capa.res_hz = res->res_hz; /* ODP will check if both were set */ + odp_res_capa.max_tmo = res->max_tmo; + err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); + if (unlikely(err)) { + EM_LOG(EM_LOG_DBG, "%s: ODP res_capability failed (ret %d)!\n", __func__, err); + return EM_ERR_BAD_ARG; + } + res->min_tmo = odp_res_capa.min_tmo; + res->max_tmo = odp_res_capa.max_tmo; + res->res_ns = odp_res_capa.res_ns; + res->res_hz = odp_res_capa.res_hz; + res->clk_src = clk_src; + return EM_OK; +} + +em_status_t em_timer_ring_capability(em_timer_ring_param_t *ring) +{ + odp_timer_clk_src_t odp_clksrc; + odp_timer_periodic_capability_t pcapa; + + if (EM_CHECK_LEVEL > 0 && unlikely(ring == NULL)) { + EM_LOG(EM_LOG_DBG, "%s: NULL ptr ring\n", __func__); + return EM_ERR_BAD_POINTER; + } + + if (unlikely(timer_clksrc_em2odp(ring->clk_src, &odp_clksrc))) { + EM_LOG(EM_LOG_DBG, "%s: Invalid clk_src %d\n", __func__, ring->clk_src); + return EM_ERR_BAD_ARG; + } + + pcapa.base_freq_hz.integer = ring->base_hz.integer; + pcapa.base_freq_hz.numer = ring->base_hz.numer; + pcapa.base_freq_hz.denom = ring->base_hz.denom; + pcapa.max_multiplier = ring->max_mul; + pcapa.res_ns = ring->res_ns; + int rv = odp_timer_periodic_capability(odp_clksrc, &pcapa); + + ring->base_hz.integer = pcapa.base_freq_hz.integer; + ring->base_hz.numer = pcapa.base_freq_hz.numer; + ring->base_hz.denom = pcapa.base_freq_hz.denom; + ring->max_mul = pcapa.max_multiplier; + ring->res_ns = pcapa.res_ns; + + if (unlikely(rv < 0)) { + EM_LOG(EM_LOG_DBG, "%s: odp failed periodic capability for clk_src %d\n", + __func__, ring->clk_src); + return EM_ERR_LIB_FAILED; + } + if (rv == 0) + return EM_ERR_NOT_SUPPORTED; /* no error, but no exact support */ + + return EM_OK; /* meet or exceed */ +} + +em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr) +{ + /* timers are initialized? */ + if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) { + INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, + "Timer is not initialized!"); + return EM_TIMER_UNDEF; + } + + if (EM_CHECK_LEVEL > 0) { + if (check_timer_attr(tmr_attr) == false) + return EM_TIMER_UNDEF; + } + + odp_timer_pool_param_t odp_tpool_param; + odp_timer_clk_src_t odp_clksrc; + + odp_timer_pool_param_init(&odp_tpool_param); + odp_tpool_param.res_ns = tmr_attr->resparam.res_ns; + odp_tpool_param.res_hz = tmr_attr->resparam.res_hz; + odp_tpool_param.min_tmo = tmr_attr->resparam.min_tmo; + odp_tpool_param.max_tmo = tmr_attr->resparam.max_tmo; + odp_tpool_param.num_timers = tmr_attr->num_tmo; + odp_tpool_param.priv = tmr_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0; + if (unlikely(timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, + "Unsupported EM-timer clock source:%d", + tmr_attr->resparam.clk_src); + return EM_TIMER_UNDEF; + } + odp_tpool_param.clk_src = odp_clksrc; + + /* check queue type support */ + odp_timer_capability_t capa; + + if (unlikely(odp_timer_capability(odp_clksrc, &capa))) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, + "ODP timer capa failed for clk:%d", + tmr_attr->resparam.clk_src); + return EM_TIMER_UNDEF; + } + if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */ + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, + "ODP does not support scheduled q for clk:%d", + tmr_attr->resparam.clk_src); + return EM_TIMER_UNDEF; + } + + odp_ticketlock_lock(&em_shm->timers.timer_lock); + + int i = find_free_timer_index(); + + if (unlikely(i >= EM_ODP_MAX_TIMERS)) { + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_CREATE, + "No more timers available"); + return EM_TIMER_UNDEF; + } + + event_timer_t *timer = &em_shm->timers.timer[i]; + char timer_pool_name[ODP_TIMER_POOL_NAME_LEN]; + const char *name = tmr_attr->name; + const char *reason = ""; + + if (tmr_attr->name[0] == '\0') { /* replace NULL with default */ + snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN, + "EM-timer-%d", timer->idx); /* idx initialized by timer_init */ + name = timer_pool_name; + } + + TMR_DBG_PRINT("Creating ODP tmr pool: clk %d, res_ns %lu, res_hz %lu\n", + odp_tpool_param.clk_src, odp_tpool_param.res_ns, + odp_tpool_param.res_hz); + timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param); + if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) { + reason = "odp_timer_pool_create error"; + goto error_locked; + } + TMR_DBG_PRINT("Created timer: %s with idx: %d\n", name, timer->idx); + + /* tmo handle pool can be per-timer or shared */ + if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */ + odp_pool_t opool = create_tmo_handle_pool(tmr_attr->num_tmo, + em_shm->opt.timer.tmo_pool_cache, timer); + + if (unlikely(opool == ODP_POOL_INVALID)) { + reason = "Tmo handle buffer pool create failed"; + goto error_locked; + } + + timer->tmo_pool = opool; + TMR_DBG_PRINT("Created per-timer tmo handle pool\n"); + } else { + if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */ + odp_pool_t opool = + create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size, + em_shm->opt.timer.tmo_pool_cache, timer); + + if (unlikely(opool == ODP_POOL_INVALID)) { + reason = "Shared tmo handle buffer pool create failed"; + goto error_locked; + } + timer->tmo_pool = opool; + em_shm->timers.shared_tmo_pool = opool; + TMR_DBG_PRINT("Created shared tmo handle pool for total %u tmos\n", + em_shm->opt.timer.shared_tmo_pool_size); + } else { + timer->tmo_pool = em_shm->timers.shared_tmo_pool; + } + } + + timer->num_tmo_reserve = tmr_attr->num_tmo; + if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */ + uint32_t left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved; + + if (timer->num_tmo_reserve > left) { + TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left); + reason = "Not enough tmos left in shared pool"; + goto error_locked; + } + em_shm->timers.reserved += timer->num_tmo_reserve; + TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n", + timer->num_tmo_reserve, em_shm->timers.reserved); + } + timer->flags = tmr_attr->flags; + timer->plain_q_ok = capa.queue_type_plain; + timer->is_ring = false; + +#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API + if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) { + reason = "odp_timer_pool_start_multi failed"; + goto error_locked; + } +#else + odp_timer_pool_start(); +#endif + em_shm->timers.num_timers++; + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + + TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), em_shm->timers.num_timers); + return TMR_I2H(i); + +error_locked: + cleanup_timer_create_fail(timer); + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + + TMR_DBG_PRINT("ERR odp tmr pool in: clk %u, res %lu, min %lu, max %lu, num %u\n", + odp_tpool_param.clk_src, odp_tpool_param.res_ns, + odp_tpool_param.min_tmo, odp_tpool_param.max_tmo, odp_tpool_param.num_timers); + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, + "Timer pool create failed, reason: ", reason); + return EM_TIMER_UNDEF; +} + +em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr) +{ + /* timers are initialized? */ + if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) { + INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, + "Timer is disabled!"); + return EM_TIMER_UNDEF; + } + + if (EM_CHECK_LEVEL > 0 && unlikely(check_timer_attr_ring(ring_attr) == false)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE, + "NULL or incorrect attribute"); + return EM_TIMER_UNDEF; + } + + odp_timer_pool_param_t odp_tpool_param; + odp_timer_clk_src_t odp_clksrc; + + odp_timer_pool_param_init(&odp_tpool_param); + odp_tpool_param.timer_type = ODP_TIMER_TYPE_PERIODIC; + odp_tpool_param.exp_mode = ODP_TIMER_EXP_AFTER; + odp_tpool_param.num_timers = ring_attr->num_tmo; + odp_tpool_param.priv = ring_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0; + if (unlikely(timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE, + "Unsupported EM-timer clock source:%d", + ring_attr->ringparam.clk_src); + return EM_TIMER_UNDEF; + } + odp_tpool_param.clk_src = odp_clksrc; + odp_tpool_param.periodic.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer; + odp_tpool_param.periodic.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer; + odp_tpool_param.periodic.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom; + odp_tpool_param.periodic.max_multiplier = ring_attr->ringparam.max_mul; + odp_tpool_param.res_hz = 0; + odp_tpool_param.res_ns = ring_attr->ringparam.res_ns; + + /* check queue type support */ + odp_timer_capability_t capa; + + if (unlikely(odp_timer_capability(odp_clksrc, &capa))) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE, + "ODP timer capa failed for clk:%d", + ring_attr->ringparam.clk_src); + return EM_TIMER_UNDEF; + } + if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */ + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE, + "ODP does not support scheduled q for clk:%d", + ring_attr->ringparam.clk_src); + return EM_TIMER_UNDEF; + } + + /* lock context to find free slot and update it */ + timer_storage_t *const tmrs = &em_shm->timers; + + odp_ticketlock_lock(&tmrs->timer_lock); + + /* is there enough events left in shared pool ? */ + uint32_t left = em_shm->opt.timer.ring.timer_event_pool_size - tmrs->ring_reserved; + + if (ring_attr->num_tmo > left) { + odp_ticketlock_unlock(&tmrs->timer_lock); + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE, + "Too few ring timeout events left (req %u/%u)", + ring_attr->num_tmo, left); + return EM_TIMER_UNDEF; + } + + /* allocate timer */ + int i = find_free_timer_index(); + + if (unlikely(i >= EM_ODP_MAX_TIMERS)) { + odp_ticketlock_unlock(&tmrs->timer_lock); + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE, + "No more timers available"); + return EM_TIMER_UNDEF; + } + + event_timer_t *timer = &tmrs->timer[i]; + + /* then timer pool */ + char timer_pool_name[ODP_TIMER_POOL_NAME_LEN]; + const char *name = ring_attr->name; + const char *reason = ""; + + if (ring_attr->name[0] == '\0') { /* replace NULL with default */ + snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN, + "EM-timer-%d", timer->idx); /* idx initialized by timer_init */ + name = timer_pool_name; + } + + TMR_DBG_PRINT("Creating ODP periodic tmr pool: clk %d, res_ns %lu, base_hz %lu\n", + odp_tpool_param.clk_src, odp_tpool_param.res_ns, + odp_tpool_param.periodic.base_freq_hz.integer); + timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param); + if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) { + reason = "odp_timer_pool_create failed"; + goto error_locked; + } + TMR_DBG_PRINT("Created ring timer: %s with idx: %d\n", name, timer->idx); + + /* tmo handle pool can be per-timer or shared */ + if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */ + odp_pool_t opool = create_tmo_handle_pool(ring_attr->num_tmo, + em_shm->opt.timer.tmo_pool_cache, timer); + + if (unlikely(opool == ODP_POOL_INVALID)) { + reason = "tmo handle pool creation failed"; + goto error_locked; + } + + timer->tmo_pool = opool; + TMR_DBG_PRINT("Created per-timer tmo handle pool %p\n", opool); + } else { + if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */ + odp_pool_t opool = + create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size, + em_shm->opt.timer.tmo_pool_cache, timer); + + if (unlikely(opool == ODP_POOL_INVALID)) { + reason = "Shared tmo handle pool creation failed"; + goto error_locked; + } + + timer->tmo_pool = opool; + em_shm->timers.shared_tmo_pool = opool; + TMR_DBG_PRINT("Created shared tmo handle pool %p\n", opool); + } else { + timer->tmo_pool = em_shm->timers.shared_tmo_pool; + } + } + + timer->num_tmo_reserve = ring_attr->num_tmo; + if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */ + left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved; + + if (timer->num_tmo_reserve > left) { + TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left); + reason = "Not enough tmos left in shared pool"; + goto error_locked; + } + em_shm->timers.reserved += timer->num_tmo_reserve; + TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n", + timer->num_tmo_reserve, em_shm->timers.reserved); + } + + /* odp timeout event pool for ring tmo events is always shared for all ring timers*/ + if (tmrs->ring_tmo_pool == ODP_POOL_INVALID) { + odp_pool_param_t odp_tmo_pool_param; + char pool_name[ODP_POOL_NAME_LEN]; + + odp_pool_param_init(&odp_tmo_pool_param); + odp_tmo_pool_param.type = ODP_POOL_TIMEOUT; + odp_tmo_pool_param.tmo.cache_size = em_shm->opt.timer.ring.timer_event_pool_cache; + TMR_DBG_PRINT("ring tmo event pool cache %u\n", odp_tmo_pool_param.tmo.cache_size); + odp_tmo_pool_param.tmo.num = em_shm->opt.timer.ring.timer_event_pool_size; + TMR_DBG_PRINT("ring tmo event pool size %u\n", odp_tmo_pool_param.tmo.num); + odp_tmo_pool_param.tmo.uarea_size = sizeof(event_hdr_t); + odp_tmo_pool_param.stats.all = 0; + snprintf(pool_name, ODP_POOL_NAME_LEN, "Ring-%d-tmo-pool", timer->idx); + tmrs->ring_tmo_pool = odp_pool_create(pool_name, &odp_tmo_pool_param); + if (unlikely(tmrs->ring_tmo_pool == ODP_POOL_INVALID)) { + reason = "odp timeout event pool creation failed"; + goto error_locked; + } + TMR_DBG_PRINT("Created ODP-timeout event pool %p: '%s'\n", + tmrs->ring_tmo_pool, pool_name); + } + + tmrs->ring_reserved += ring_attr->num_tmo; + TMR_DBG_PRINT("Updated ring reserve by +%u to %u\n", ring_attr->num_tmo, + tmrs->ring_reserved); + tmrs->num_rings++; + tmrs->num_timers++; + timer->num_ring_reserve = ring_attr->num_tmo; + timer->flags = ring_attr->flags; + timer->plain_q_ok = capa.queue_type_plain; + timer->is_ring = true; + tmrs->num_ring_create_calls++; + +#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API + if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) { + reason = "odp_timer_pool_start_multi failed"; + goto error_locked; + } +#else + odp_timer_pool_start(); +#endif + + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + + TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), tmrs->num_timers); + return TMR_I2H(i); + +error_locked: + cleanup_timer_create_fail(timer); + odp_ticketlock_unlock(&tmrs->timer_lock); + + TMR_DBG_PRINT("ERR odp tmr ring pool in: clk %u, res %lu, base_hz %lu, max_mul %lu, num tmo %u\n", + ring_attr->ringparam.clk_src, + ring_attr->ringparam.res_ns, + ring_attr->ringparam.base_hz.integer, + ring_attr->ringparam.max_mul, + ring_attr->num_tmo); + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE, + "Ring timer create failed, reason: ", reason); + return EM_TIMER_UNDEF; +} + +em_status_t em_timer_delete(em_timer_t tmr) +{ + timer_storage_t *const tmrs = &em_shm->timers; + int i = TMR_H2I(tmr); + em_status_t rv = EM_OK; + odp_pool_t pool_fail = ODP_POOL_INVALID; + + /* take lock before checking so nothing can change */ + odp_ticketlock_lock(&tmrs->timer_lock); + if (unlikely(!is_timer_valid(tmr))) { + odp_ticketlock_unlock(&tmrs->timer_lock); + return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_DELETE, + "Invalid timer:%" PRI_TMR "", tmr); + } + + if (tmrs->timer[i].tmo_pool != tmrs->shared_tmo_pool) { /* don't delete shared pool */ + if (unlikely(odp_pool_destroy(tmrs->timer[i].tmo_pool) != 0)) { + rv = EM_ERR_LIB_FAILED; + pool_fail = tmrs->timer[i].tmo_pool; + } else { + TMR_DBG_PRINT("Deleted odp pool %p\n", tmrs->timer[i].tmo_pool); + } + } + tmrs->timer[i].tmo_pool = ODP_POOL_INVALID; + odp_timer_pool_destroy(tmrs->timer[i].odp_tmr_pool); + tmrs->timer[i].odp_tmr_pool = ODP_TIMER_POOL_INVALID; + + /* Ring delete. Don't remove shared event pool as user could still have event */ + if (tmrs->timer[i].is_ring && tmrs->num_rings) { + tmrs->num_rings--; + if (tmrs->num_rings < 1) + TMR_DBG_PRINT("Last ring deleted"); + tmrs->ring_reserved -= tmrs->timer[i].num_ring_reserve; + TMR_DBG_PRINT("Updated ring reserve by -%u to %u\n", + tmrs->timer[i].num_ring_reserve, tmrs->ring_reserved); + tmrs->timer[i].num_ring_reserve = 0; + } + + tmrs->num_timers--; + if (tmrs->shared_tmo_pool != ODP_POOL_INVALID) { /* shared pool in use */ + tmrs->reserved -= tmrs->timer[i].num_tmo_reserve; + TMR_DBG_PRINT("Updated tmo reserve by -%u to %u\n", + tmrs->timer[i].num_tmo_reserve, tmrs->reserved); + tmrs->timer[i].num_tmo_reserve = 0; + } + if (tmrs->num_timers == 0 && tmrs->shared_tmo_pool != ODP_POOL_INVALID) { + /* no more timers, delete shared tmo pool */ + if (unlikely(odp_pool_destroy(tmrs->shared_tmo_pool) != 0)) { + rv = EM_ERR_LIB_FAILED; + pool_fail = tmrs->shared_tmo_pool; + } else { + TMR_DBG_PRINT("Deleted shared tmo pool %p\n", tmrs->shared_tmo_pool); + tmrs->shared_tmo_pool = ODP_POOL_INVALID; + } + } + + odp_ticketlock_unlock(&tmrs->timer_lock); + if (unlikely(rv != EM_OK)) { + return INTERNAL_ERROR(rv, EM_ESCOPE_TIMER_DELETE, + "timer %p delete fail, odp pool %p fail\n", tmr, pool_fail); + } + TMR_DBG_PRINT("ok, deleted timer %p, num_timers %u\n", tmr, tmrs->num_timers); + return rv; +} + +em_timer_tick_t em_timer_current_tick(em_timer_t tmr) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + int i = TMR_H2I(tmr); + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) + return 0; + + return odp_timer_current_tick(tmrs->timer[i].odp_tmr_pool); +} + +em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue) +{ + return em_tmo_create_arg(tmr, flags, queue, NULL); +} + +em_tmo_t em_tmo_create_arg(em_timer_t tmr, em_tmo_flag_t flags, + em_queue_t queue, em_tmo_args_t *args) +{ + const queue_elem_t *const q_elem = queue_elem_get(queue); + + if (EM_CHECK_LEVEL > 0) { + if (unlikely(!is_timer_valid(tmr))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Invalid timer:%" PRI_TMR "", tmr); + return EM_TMO_UNDEF; + } + if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": inv.Q:%" PRI_QUEUE "", + tmr, queue); + return EM_TMO_UNDEF; + } + if (unlikely(!is_queue_valid_type(tmr, q_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": inv.Q (type):%" PRI_QUEUE "", + tmr, queue); + return EM_TMO_UNDEF; + } + if (unlikely(!check_tmo_flags(flags))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": inv. tmo-flags:0x%x", + tmr, flags); + return EM_TMO_UNDEF; + } + } + + int i = TMR_H2I(tmr); + + if (EM_CHECK_LEVEL > 1 && + em_shm->timers.timer[i].is_ring && + !(flags & EM_TMO_FLAG_PERIODIC)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": asking oneshot with ring timer!", + tmr); + return EM_TMO_UNDEF; + } + + odp_buffer_t tmo_buf = odp_buffer_alloc(em_shm->timers.timer[i].tmo_pool); + + if (unlikely(tmo_buf == ODP_BUFFER_INVALID)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": tmo pool exhausted", tmr); + return EM_TMO_UNDEF; + } + + em_timer_timeout_t *tmo = odp_buffer_addr(tmo_buf); + odp_timer_pool_t odptmr = em_shm->timers.timer[i].odp_tmr_pool; + + const void *userptr = NULL; + + if (args != NULL) + userptr = args->userptr; + + tmo->odp_timer = odp_timer_alloc(odptmr, q_elem->odp_queue, userptr); + if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": odp_timer_alloc() failed", tmr); + odp_buffer_free(tmo_buf); + return EM_TMO_UNDEF; + } + + /* OK, init state. Some values copied for faster access runtime */ + tmo->period = 0; + tmo->odp_timer_pool = odptmr; + tmo->timer = tmr; + tmo->odp_buffer = tmo_buf; + tmo->flags = flags; + tmo->queue = queue; + tmo->is_ring = em_shm->timers.timer[i].is_ring; + tmo->odp_timeout = ODP_EVENT_INVALID; + tmo->ring_tmo_pool = em_shm->timers.ring_tmo_pool; + + if (tmo->is_ring) { /* pre-allocate timeout event to save time at start */ + odp_event_t odp_tmo_event = alloc_odp_timeout(tmo); + + if (unlikely(odp_tmo_event == ODP_EVENT_INVALID)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE, + "Ring: odp timeout event allocation failed"); + odp_timer_free(tmo->odp_timer); + odp_buffer_free(tmo_buf); + return EM_TMO_UNDEF; + } + tmo->odp_timeout = odp_tmo_event; + TMR_DBG_PRINT("Ring: allocated odp timeout ev %p\n", tmo->odp_timeout); + } + + if (EM_TIMER_TMO_STATS) + memset(&tmo->stats, 0, sizeof(em_tmo_stats_t)); + + odp_atomic_init_u32(&tmo->state, EM_TMO_STATE_IDLE); + TMR_DBG_PRINT("ODP timer %p allocated\n", tmo->odp_timer); + TMR_DBG_PRINT("tmo %p created\n", tmo); + return tmo; +} + +em_status_t em_tmo_delete(em_tmo_t tmo) +{ + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(tmo == EM_TMO_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_DELETE, + "Invalid args: tmo:%" PRI_TMO, tmo); + } + + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + if (EM_CHECK_LEVEL > 1) { + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE, + "Invalid tmo buffer"); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_DELETE, + "Invalid tmo state:%d", tmo_state); + + RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE, + "Invalid tmo odp_timer, deleted?"); + } + + TMR_DBG_PRINT("ODP timer %p\n", tmo->odp_timer); + + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_UNKNOWN); + +#if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API + /* ODP 1.43 odp_timer_free() returns status */ + int fret = odp_timer_free(tmo->odp_timer); + + RETURN_ERROR_IF(fret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE, + "odp timer free failed!?, rv %d\n", fret); +#else + /* Older than ODP 1.43 odp_timer_free() returns an event */ + odp_event_t odp_evt; + + odp_evt = ODP_EVENT_INVALID; + odp_evt = odp_timer_free(tmo->odp_timer); + + RETURN_ERROR_IF(odp_evt != ODP_EVENT_INVALID, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE, + "odp timer free returned an event %p\n", odp_evt); +#endif + + odp_buffer_t tmp = tmo->odp_buffer; + + tmo->odp_timer = ODP_TIMER_INVALID; + tmo->odp_buffer = ODP_BUFFER_INVALID; + + if (tmo->is_ring && tmo->odp_timeout != ODP_EVENT_INVALID) { + TMR_DBG_PRINT("ring: free unused ODP timeout ev %p\n", tmo->odp_timeout); + free_odp_timeout(tmo->odp_timeout); + tmo->odp_timeout = ODP_EVENT_INVALID; + } + + odp_buffer_free(tmp); + + TMR_DBG_PRINT("tmo %p delete ok\n", tmo); + + return EM_OK; +} + +em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, + em_event_t tmo_ev) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, tmo_ev); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, + "Invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo->flags & EM_TMO_FLAG_PERIODIC), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_ABS, + "Cannot set periodic tmo, use _set_periodic()"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && + !is_event_type_valid(tmo_ev), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS, + "invalid event type"); + if (EM_CHECK_LEVEL > 1) { + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_ABS, + "Invalid tmo state:%d", tmo_state); + } + RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && + tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, + "Invalid tmo odp_timer"); + + event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); + odp_event_t odp_ev = event_em2odp(tmo_ev); + bool esv_ena = esv_enabled(); + odp_timer_start_t startp; + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS, + "Invalid event type: timer-ring"); + + if (esv_ena) + evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS); + + /* set tmo active and arm with absolute time */ + startp.tick_type = ODP_TIMER_TICK_ABS; + startp.tick = ticks_abs; + startp.tmo_ev = odp_ev; + ev_hdr->flags.tmo_type = EM_TMO_TYPE_ONESHOT; + ev_hdr->tmo = tmo; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); + int odpret = odp_timer_start(tmo->odp_timer, &startp); + + if (unlikely(odpret != ODP_TIMER_SUCCESS)) { + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + if (esv_ena) + evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS__FAIL); + + em_status_t retval = timer_rv_odp2em(odpret); + + if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ + TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); + return retval; + } + + return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_ABS, + "odp_timer_start():%d", odpret); + } + TMR_DBG_PRINT("OK\n"); + return EM_OK; +} + +em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, + em_event_t tmo_ev) +{ + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, tmo_ev); + + RETURN_ERROR_IF(tmo->flags & EM_TMO_FLAG_PERIODIC, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, + "%s: Periodic no longer supported", __func__); + } + if (EM_CHECK_LEVEL > 1) { + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_REL, + "Invalid tmo buffer"); + + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_REL, + "Invalid tmo state:%d", tmo_state); + } + RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && + !is_event_type_valid(tmo_ev), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, + "invalid event type"); + + event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); + odp_event_t odp_ev = event_em2odp(tmo_ev); + bool esv_ena = esv_enabled(); + odp_timer_start_t startp; + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, + "Invalid event type: timer-ring"); + + if (esv_ena) + evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL); + + /* set tmo active and arm with relative time */ + startp.tick_type = ODP_TIMER_TICK_REL; + startp.tick = ticks_rel; + startp.tmo_ev = odp_ev; + ev_hdr->flags.tmo_type = EM_TMO_TYPE_ONESHOT; + ev_hdr->tmo = tmo; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); + int odpret = odp_timer_start(tmo->odp_timer, &startp); + + if (unlikely(odpret != ODP_TIMER_SUCCESS)) { + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + if (esv_ena) + evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL__FAIL); + + em_status_t retval = timer_rv_odp2em(odpret); + + if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ + TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); + return retval; + } + return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_REL, + "odp_timer_start():%d", odpret); + } + TMR_DBG_PRINT("OK\n"); + return EM_OK; +} + +em_status_t em_tmo_set_periodic(em_tmo_t tmo, + em_timer_tick_t start_abs, + em_timer_tick_t period, + em_event_t tmo_ev) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, tmo_ev); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC, + "Invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC, + "Not periodic tmo"); + if (EM_CHECK_LEVEL > 1) { + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC, + "Invalid tmo state:%d", tmo_state); + } + RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && + !is_event_type_valid(tmo_ev), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC, + "invalid event type"); + + event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); + odp_event_t odp_ev = event_em2odp(tmo_ev); + bool esv_ena = esv_enabled(); + odp_timer_start_t startp; + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + ev_hdr->event_type == EM_EVENT_TYPE_TIMER_IND, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC, + "Invalid event type: timer-ring"); + + if (esv_ena) + evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC); + + TMR_DBG_PRINT("start %lu, period %lu\n", start_abs, period); + + tmo->period = period; + if (start_abs == 0) + start_abs = odp_timer_current_tick(tmo->odp_timer_pool) + period; + tmo->last_tick = start_abs; + TMR_DBG_PRINT("last_tick %lu, now %lu\n", tmo->last_tick, + odp_timer_current_tick(tmo->odp_timer_pool)); + + /* set tmo active and arm with absolute time */ + startp.tick_type = ODP_TIMER_TICK_ABS; + startp.tick = start_abs; + startp.tmo_ev = odp_ev; + ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; + ev_hdr->tmo = tmo; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); + int odpret = odp_timer_start(tmo->odp_timer, &startp); + + if (unlikely(odpret != ODP_TIMER_SUCCESS)) { + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + if (esv_ena) + evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC__FAIL); + + TMR_DBG_PRINT("diff to tmo %ld\n", + (int64_t)tmo->last_tick - + (int64_t)odp_timer_current_tick(tmo->odp_timer_pool)); + + em_status_t retval = timer_rv_odp2em(odpret); + + if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ + TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); + return retval; + } + return INTERNAL_ERROR(retval, + EM_ESCOPE_TMO_SET_PERIODIC, + "odp_timer_start():%d", odpret); + } + TMR_DBG_PRINT("OK\n"); + return EM_OK; +} + +em_status_t em_tmo_set_periodic_ring(em_tmo_t tmo, + em_timer_tick_t start_abs, + uint64_t multiplier, + em_event_t tmo_ev) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo == EM_TMO_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING, + "Inv.args: tmo UNDEF"); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC_RING, + "Invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC_RING, + "Not periodic tmo"); + if (EM_CHECK_LEVEL > 1) { + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC_RING, + "Invalid tmo state:%d", tmo_state); + } + + odp_timer_periodic_start_t startp; + odp_event_t odp_ev = tmo->odp_timeout; /* pre-allocated */ + + if (tmo_ev != EM_EVENT_UNDEF) { /* user gave event to (re-)use */ + odp_ev = event_em2odp(tmo_ev); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + odp_event_type(odp_ev) != ODP_EVENT_TIMEOUT, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING, + "Inv.args: not TIMER event given"); + odp_timeout_t odp_tmo = odp_timeout_from_event(odp_ev); + event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo); + + ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; + ev_hdr->tmo = tmo; + TMR_DBG_PRINT("user event %p\n", tmo_ev); + } else { + tmo->odp_timeout = ODP_EVENT_INVALID; /* now used */ + } + + if (odp_ev == ODP_EVENT_INVALID) { /* re-start, pre-alloc used */ + odp_event_t odp_tmo_event = alloc_odp_timeout(tmo); + + if (unlikely(odp_tmo_event == ODP_EVENT_INVALID)) + return INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_SET_PERIODIC_RING, + "Ring: odp timeout event allocation failed"); + odp_ev = odp_tmo_event; + } + + TMR_DBG_PRINT("ring tmo start_abs %lu, M=%lu, odp ev=%p\n", start_abs, multiplier, odp_ev); + startp.first_tick = start_abs; + startp.freq_multiplier = multiplier; + startp.tmo_ev = odp_ev; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); + int odpret = odp_timer_periodic_start(tmo->odp_timer, &startp); + + if (unlikely(odpret != ODP_TIMER_SUCCESS)) { + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + + em_status_t retval = timer_rv_odp2em(odpret); + + if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */ + TMR_DBG_PRINT("TOONEAR, skip ErrH\n"); + return retval; + } + return INTERNAL_ERROR(retval, + EM_ESCOPE_TMO_SET_PERIODIC_RING, + "odp_timer_periodic_start(): ret %d", odpret); + } + /* ok */ + TMR_DBG_PRINT("OK\n"); + return EM_OK; +} + +em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event) +{ + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CANCEL, + "Invalid args: tmo:%" PRI_TMO " cur_event:%p", + tmo, cur_event); + } + *cur_event = EM_EVENT_UNDEF; + if (EM_CHECK_LEVEL > 1) { + RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo buffer"); + RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo odp_timer"); + } + + /* check state: EM_TMO_STATE_UNKNOWN | EM_TMO_STATE_IDLE | EM_TMO_STATE_ACTIVE */ + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state != EM_TMO_STATE_ACTIVE, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo state:%d (!%d)", tmo_state, EM_TMO_STATE_ACTIVE); + + TMR_DBG_PRINT("ODP tmo %p\n", tmo->odp_timer); + + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + + if (tmo->is_ring) { /* periodic ring never returns event here */ + RETURN_ERROR_IF(odp_timer_periodic_cancel(tmo->odp_timer) != 0, + EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL, + "odp periodic cancel fail"); + return EM_ERR_TOONEAR; /* ack will tell when no more coming */ + } + + /* not ring, cancel*/ + odp_event_t odp_ev = ODP_EVENT_INVALID; + int ret = odp_timer_cancel(tmo->odp_timer, &odp_ev); + + if (ret == ODP_TIMER_TOO_NEAR) { + if (EM_CHECK_LEVEL > 1) { + RETURN_ERROR_IF(odp_ev != ODP_EVENT_INVALID, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, + "ODP timer cancel return TOONEAR but return event!"); + } + TMR_DBG_PRINT("ODP returned TOONEAR\n"); + return EM_ERR_TOONEAR; + } + + RETURN_ERROR_IF(ret == ODP_TIMER_FAIL, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, + "ODP timer cancel fail!"); + + /* + * Cancel successful (ret == ODP_TIMER_SUCCESS): odp_ev contains the canceled tmo event + */ + + if (EM_CHECK_LEVEL > 2) { + RETURN_ERROR_IF(!odp_event_is_valid(odp_ev), + EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo event from odp_timer_cancel"); + } + + em_event_t tmo_ev = event_odp2em(odp_ev); + event_hdr_t *ev_hdr = event_to_hdr(tmo_ev); + + /* successful cancel also resets the event tmo type */ + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + + if (esv_enabled()) + tmo_ev = evstate_em2usr(tmo_ev, ev_hdr, EVSTATE__TMO_CANCEL); + + *cur_event = tmo_ev; + TMR_DBG_PRINT("OK\n"); + return EM_OK; +} + +em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo == EM_TMO_UNDEF || next_tmo_ev == EM_EVENT_UNDEF), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, next_tmo_ev); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_ACK, + "Tmo ACK: invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_ACK, + "Tmo ACK: Not a periodic tmo"); + + if (EM_TIMER_TMO_STATS) + tmo->stats.num_acks++; + + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + event_hdr_t *ev_hdr = event_to_hdr(next_tmo_ev); + odp_event_t odp_ev = event_em2odp(next_tmo_ev); + + if (tmo->is_ring) /* ring timer */ + return ack_ring_timeout_event(tmo, next_tmo_ev, tmo_state, ev_hdr, odp_ev); + + /* not periodic ring, set next timeout */ + if (unlikely(tmo_state != EM_TMO_STATE_ACTIVE)) { + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + + if (tmo_state == EM_TMO_STATE_IDLE) /* canceled, skip errorhandler */ + return EM_ERR_CANCELED; + + return INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_ACK, + "Tmo ACK: invalid tmo state:%d", tmo_state); + } + + bool esv_ena = esv_enabled(); + + if (esv_ena) + evstate_usr2em(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK); + /* + * The periodic timer will silently stop if ack fails! Attempt to + * handle exceptions and if the tmo cannot be renewed, call + * the errorhandler so the application may recover. + */ + tmo->last_tick += tmo->period; /* maintain absolute time */ + int ret; + int tries = EM_TIMER_ACK_TRIES; + em_status_t err; + odp_timer_start_t startp; + + startp.tick_type = ODP_TIMER_TICK_ABS; + startp.tmo_ev = odp_ev; + ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; /* could be new event */ + ev_hdr->tmo = tmo; + + /* try to set tmo EM_TIMER_ACK_TRIES times */ + do { + /* ask new timeout for next period */ + startp.tick = tmo->last_tick; + ret = odp_timer_start(tmo->odp_timer, &startp); + /* + * Calling ack() was delayed over next period if 'ret' is + * ODP_TIMER_TOO_NEAR, i.e. now in past. Other errors + * should not happen, fatal for this tmo + */ + if (likely(ret != ODP_TIMER_TOO_NEAR)) { + if (ret != ODP_TIMER_SUCCESS) { + TMR_DBG_PRINT("ODP return %d\n" + "tmo tgt/tick now %lu/%lu\n", + ret, tmo->last_tick, + odp_timer_current_tick(tmo->odp_timer_pool)); + } + break; /* ok */ + } + + /* ODP_TIMER_TOO_NEAR: ack() delayed beyond next time slot */ + if (EM_TIMER_TMO_STATS) + tmo->stats.num_late_ack++; + TMR_DBG_PRINT("late, tgt/now %lu/%lu\n", tmo->last_tick, + odp_timer_current_tick(tmo->odp_timer_pool)); + + if (tmo->flags & EM_TMO_FLAG_NOSKIP) /* not allowed to skip, send immediately */ + return handle_ack_noskip(next_tmo_ev, ev_hdr, tmo->queue); + + /* skip already passed periods and try again */ + handle_ack_skip(tmo); + + tries--; + if (unlikely(tries < 1)) { + err = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_TMO_ACK, + "Tmo ACK: too many retries:%u", + EM_TIMER_ACK_TRIES); + goto ack_err; + } + } while (ret != ODP_TIMER_SUCCESS); + + if (unlikely(ret != ODP_TIMER_SUCCESS)) { + err = INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK, + "Tmo ACK: failed to renew tmo (odp ret %d)", + ret); + goto ack_err; + } + return EM_OK; + +ack_err: + /* fail, restore event state */ + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + if (esv_ena) + evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__FAIL); + return err; +} + +int em_timer_get_all(em_timer_t *tmr_list, int max) +{ + odp_ticketlock_lock(&em_shm->timers.timer_lock); + + const uint32_t num_timers = em_shm->timers.num_timers; + + if (tmr_list && max > 0 && num_timers > 0) { + int num = 0; + + for (int i = 0; i < EM_ODP_MAX_TIMERS; i++) { + if (em_shm->timers.timer[i].odp_tmr_pool != ODP_TIMER_POOL_INVALID) { + tmr_list[num] = TMR_I2H(i); + num++; + if (num >= max) + break; + } + } + } + + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + + return num_timers; +} + +em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr) +{ + odp_timer_pool_info_t poolinfo; + int i = TMR_H2I(tmr); + int ret; + em_timer_clksrc_t clk = EM_TIMER_CLKSRC_DEFAULT; + + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(!is_timer_valid(tmr) || tmr_attr == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_ATTR, + "Inv.args: timer:%" PRI_TMR " tmr_attr:%p", + tmr, tmr_attr); + + /* get current values from ODP */ + ret = odp_timer_pool_info(em_shm->timers.timer[i].odp_tmr_pool, &poolinfo); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_GET_ATTR, + "ODP timer pool info failed"); + + timer_clksrc_odp2em(poolinfo.param.clk_src, &clk); + + if (poolinfo.param.timer_type == ODP_TIMER_TYPE_SINGLE) { + tmr_attr->resparam.res_ns = poolinfo.param.res_ns; + tmr_attr->resparam.res_hz = poolinfo.param.res_hz; + tmr_attr->resparam.max_tmo = poolinfo.param.max_tmo; + tmr_attr->resparam.min_tmo = poolinfo.param.min_tmo; + tmr_attr->resparam.clk_src = clk; + memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t)); + } else { + tmr_attr->ringparam.base_hz.integer = poolinfo.param.periodic.base_freq_hz.integer; + tmr_attr->ringparam.base_hz.numer = poolinfo.param.periodic.base_freq_hz.numer; + tmr_attr->ringparam.base_hz.denom = poolinfo.param.periodic.base_freq_hz.denom; + tmr_attr->ringparam.max_mul = poolinfo.param.periodic.max_multiplier; + tmr_attr->ringparam.res_ns = poolinfo.param.res_ns; + tmr_attr->ringparam.clk_src = clk; + memset(&tmr_attr->resparam, 0, sizeof(em_timer_res_param_t)); + } + + tmr_attr->num_tmo = poolinfo.param.num_timers; + tmr_attr->flags = em_shm->timers.timer[i].flags; + + strncpy(tmr_attr->name, poolinfo.name, EM_TIMER_NAME_LEN - 1); + tmr_attr->name[EM_TIMER_NAME_LEN - 1] = '\0'; + return EM_OK; +} + +uint64_t em_timer_get_freq(em_timer_t tmr) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_FREQ, + "Invalid timer:%" PRI_TMR "", tmr); + return 0; + } + + return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, + 1000ULL * 1000ULL * 1000ULL); /* 1 sec */ +} + +uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_TICK_TO_NS, + "Invalid timer:%" PRI_TMR "", tmr); + return 0; + } + + return odp_timer_tick_to_ns(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ticks); +} + +em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_NS_TO_TICK, + "Invalid timer:%" PRI_TMR "", tmr); + return 0; + } + + return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ns); +} + +em_tmo_state_t em_tmo_get_state(em_tmo_t tmo) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo"); + return EM_TMO_STATE_UNKNOWN; + } + if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo buffer"); + return EM_TMO_STATE_UNKNOWN; + } + + return odp_atomic_load_acq_u32(&tmo->state); +} + +em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo == EM_TMO_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATS, + "Invalid tmo"); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATS, + "Invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATS, + "tmo deleted?"); + + if (EM_TIMER_TMO_STATS) { + if (stat) + *stat = tmo->stats; + } else { + return EM_ERR_NOT_IMPLEMENTED; + } + + return EM_OK; +} + +em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid event given"); + return EM_TMO_TYPE_NONE; + } + + event_hdr_t *ev_hdr = event_to_hdr(event); + em_tmo_type_t type = (em_tmo_type_t)ev_hdr->flags.tmo_type; + + if (EM_CHECK_LEVEL > 1 && unlikely(!can_have_tmo_type(event))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, + "Invalid event type"); + return EM_TMO_TYPE_NONE; + } + + if (EM_CHECK_LEVEL > 2 && unlikely(type > EM_TMO_TYPE_PERIODIC)) { + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATE, + "Invalid tmo event type, header corrupted?"); + return EM_TMO_TYPE_NONE; + } + + if (tmo) + *tmo = (type == EM_TMO_TYPE_NONE) ? EM_TMO_UNDEF : ev_hdr->tmo; + + if (reset && ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND) { + ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; + ev_hdr->tmo = EM_TMO_UNDEF; + } + + return type; +} + +void *em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_USERPTR, "Invalid event given"); + return NULL; + } + + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t evtype = odp_event_type(odp_event); + + if (unlikely(evtype != ODP_EVENT_TIMEOUT)) /* no errorhandler for other events */ + return NULL; + + event_hdr_t *ev_hdr = event_to_hdr(event); /* will not return on error */ + + if (tmo) /* always periodic timeout here */ + *tmo = ev_hdr->tmo; + + return odp_timeout_user_ptr(odp_timeout_from_event(odp_event)); +} + +em_timer_t em_tmo_get_timer(em_tmo_t tmo) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Invalid tmo given"); + return EM_TIMER_UNDEF; + } + if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Corrupted tmo?"); + return EM_TIMER_UNDEF; + } + + return tmo->timer; +} + +uint64_t em_timer_to_u64(em_timer_t timer) +{ + return (uint64_t)timer; +} + +uint64_t em_tmo_to_u64(em_tmo_t tmo) +{ + return (uint64_t)tmo; +}