LCOV - code coverage report
Current view: top level - lib/timer - rte_timer.c (source / functions) Hit Total Coverage
Test: Code coverage Lines: 209 306 68.3 %
Date: 2025-05-01 17:49:45 Functions: 22 27 81.5 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 128 232 55.2 %

           Branch data     Line data    Source code
       1                 :            : /* SPDX-License-Identifier: BSD-3-Clause
       2                 :            :  * Copyright(c) 2010-2014 Intel Corporation
       3                 :            :  */
       4                 :            : 
       5                 :            : #include <stdio.h>
       6                 :            : #include <stdint.h>
       7                 :            : #include <stdbool.h>
       8                 :            : #include <inttypes.h>
       9                 :            : #include <assert.h>
      10                 :            : 
      11                 :            : #include <eal_export.h>
      12                 :            : #include <rte_common.h>
      13                 :            : #include <rte_cycles.h>
      14                 :            : #include <rte_eal_memconfig.h>
      15                 :            : #include <rte_memory.h>
      16                 :            : #include <rte_lcore.h>
      17                 :            : #include <rte_branch_prediction.h>
      18                 :            : #include <rte_spinlock.h>
      19                 :            : #include <rte_random.h>
      20                 :            : #include <rte_pause.h>
      21                 :            : #include <rte_memzone.h>
      22                 :            : 
      23                 :            : #include "rte_timer.h"
      24                 :            : 
      25                 :            : /**
      26                 :            :  * Per-lcore info for timers.
      27                 :            :  */
      28                 :            : struct __rte_cache_aligned priv_timer {
      29                 :            :         struct rte_timer pending_head;  /**< dummy timer instance to head up list */
      30                 :            :         rte_spinlock_t list_lock;       /**< lock to protect list access */
      31                 :            : 
      32                 :            :         /** per-core variable that true if a timer was updated on this
      33                 :            :          *  core since last reset of the variable */
      34                 :            :         int updated;
      35                 :            : 
      36                 :            :         /** track the current depth of the skiplist */
      37                 :            :         unsigned curr_skiplist_depth;
      38                 :            : 
      39                 :            :         unsigned prev_lcore;              /**< used for lcore round robin */
      40                 :            : 
      41                 :            :         /** running timer on this lcore now */
      42                 :            :         struct rte_timer *running_tim;
      43                 :            : 
      44                 :            : #ifdef RTE_LIBRTE_TIMER_DEBUG
      45                 :            :         /** per-lcore statistics */
      46                 :            :         struct rte_timer_debug_stats stats;
      47                 :            : #endif
      48                 :            : };
      49                 :            : 
      50                 :            : #define FL_ALLOCATED    (1 << 0)
      51                 :            : struct rte_timer_data {
      52                 :            :         struct priv_timer priv_timer[RTE_MAX_LCORE];
      53                 :            :         uint8_t internal_flags;
      54                 :            : };
      55                 :            : 
      56                 :            : #define RTE_MAX_DATA_ELS 64
      57                 :            : static const struct rte_memzone *rte_timer_data_mz;
      58                 :            : static int *volatile rte_timer_mz_refcnt;
      59                 :            : static struct rte_timer_data *rte_timer_data_arr;
      60                 :            : static const uint32_t default_data_id;
      61                 :            : static uint32_t rte_timer_subsystem_initialized;
      62                 :            : 
      63                 :            : /* when debug is enabled, store some statistics */
      64                 :            : #ifdef RTE_LIBRTE_TIMER_DEBUG
      65                 :            : #define __TIMER_STAT_ADD(priv_timer, name, n) do {                      \
      66                 :            :                 unsigned __lcore_id = rte_lcore_id();                   \
      67                 :            :                 if (__lcore_id < RTE_MAX_LCORE)                              \
      68                 :            :                         priv_timer[__lcore_id].stats.name += (n);       \
      69                 :            :         } while(0)
      70                 :            : #else
      71                 :            : #define __TIMER_STAT_ADD(priv_timer, name, n) do {} while (0)
      72                 :            : #endif
      73                 :            : 
      74                 :            : static inline int
      75                 :            : timer_data_valid(uint32_t id)
      76                 :            : {
      77   [ +  -  -  -  :     334815 :         return rte_timer_data_arr &&
          -  -  -  -  +  
             -  +  -  -  
                      - ]
      78   [ +  -  -  -  :     811509 :                 (rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
          -  -  -  -  +  
          #  +  +  +  #  
                   -  - ]
      79                 :            : }
      80                 :            : 
      81                 :            : /* validate ID and retrieve timer data pointer, or return error value */
      82                 :            : #define TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, retval) do {    \
      83                 :            :         if (id >= RTE_MAX_DATA_ELS || !timer_data_valid(id))         \
      84                 :            :                 return retval;                                          \
      85                 :            :         timer_data = &rte_timer_data_arr[id];                               \
      86                 :            : } while (0)
      87                 :            : 
      88                 :            : RTE_EXPORT_SYMBOL(rte_timer_data_alloc)
      89                 :            : int
      90                 :          0 : rte_timer_data_alloc(uint32_t *id_ptr)
      91                 :            : {
      92                 :            :         int i;
      93                 :            :         struct rte_timer_data *data;
      94                 :            : 
      95         [ #  # ]:          0 :         if (!rte_timer_subsystem_initialized)
      96                 :            :                 return -ENOMEM;
      97                 :            : 
      98         [ #  # ]:          0 :         for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
      99                 :          0 :                 data = &rte_timer_data_arr[i];
     100         [ #  # ]:          0 :                 if (!(data->internal_flags & FL_ALLOCATED)) {
     101                 :          0 :                         data->internal_flags |= FL_ALLOCATED;
     102                 :            : 
     103         [ #  # ]:          0 :                         if (id_ptr)
     104                 :          0 :                                 *id_ptr = i;
     105                 :            : 
     106                 :          0 :                         return 0;
     107                 :            :                 }
     108                 :            :         }
     109                 :            : 
     110                 :            :         return -ENOSPC;
     111                 :            : }
     112                 :            : 
     113                 :            : RTE_EXPORT_SYMBOL(rte_timer_data_dealloc)
     114                 :            : int
     115                 :          0 : rte_timer_data_dealloc(uint32_t id)
     116                 :            : {
     117                 :            :         struct rte_timer_data *timer_data;
     118         [ #  # ]:          0 :         TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, -EINVAL);
     119                 :            : 
     120                 :          0 :         timer_data->internal_flags &= ~(FL_ALLOCATED);
     121                 :            : 
     122                 :          0 :         return 0;
     123                 :            : }
     124                 :            : 
     125                 :            : /* Init the timer library. Allocate an array of timer data structs in shared
     126                 :            :  * memory, and allocate the zeroth entry for use with original timer
     127                 :            :  * APIs. Since the intersection of the sets of lcore ids in primary and
     128                 :            :  * secondary processes should be empty, the zeroth entry can be shared by
     129                 :            :  * multiple processes.
     130                 :            :  */
     131                 :            : RTE_EXPORT_SYMBOL(rte_timer_subsystem_init)
     132                 :            : int
     133                 :        180 : rte_timer_subsystem_init(void)
     134                 :            : {
     135                 :            :         const struct rte_memzone *mz;
     136                 :            :         struct rte_timer_data *data;
     137                 :            :         int i, lcore_id;
     138                 :            :         static const char *mz_name = "rte_timer_mz";
     139                 :            :         const size_t data_arr_size =
     140                 :            :                         RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr);
     141                 :            :         const size_t mem_size = data_arr_size + sizeof(*rte_timer_mz_refcnt);
     142                 :            :         bool do_full_init = true;
     143                 :            : 
     144                 :        180 :         rte_mcfg_timer_lock();
     145                 :            : 
     146         [ -  + ]:        180 :         if (rte_timer_subsystem_initialized) {
     147                 :          0 :                 rte_mcfg_timer_unlock();
     148                 :          0 :                 return -EALREADY;
     149                 :            :         }
     150                 :            : 
     151                 :        180 :         mz = rte_memzone_lookup(mz_name);
     152         [ +  + ]:        180 :         if (mz == NULL) {
     153                 :        155 :                 mz = rte_memzone_reserve_aligned(mz_name, mem_size,
     154                 :            :                                 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
     155         [ -  + ]:        155 :                 if (mz == NULL) {
     156                 :          0 :                         rte_mcfg_timer_unlock();
     157                 :          0 :                         return -ENOMEM;
     158                 :            :                 }
     159                 :            :                 do_full_init = true;
     160                 :            :         } else
     161                 :            :                 do_full_init = false;
     162                 :            : 
     163                 :        180 :         rte_timer_data_mz = mz;
     164                 :        180 :         rte_timer_data_arr = mz->addr;
     165                 :        180 :         rte_timer_mz_refcnt = (void *)((char *)mz->addr + data_arr_size);
     166                 :            : 
     167         [ +  + ]:        180 :         if (do_full_init) {
     168         [ +  + ]:      10075 :                 for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
     169                 :       9920 :                         data = &rte_timer_data_arr[i];
     170                 :            : 
     171         [ +  + ]:    1279680 :                         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE;
     172                 :    1269760 :                              lcore_id++) {
     173                 :            :                                 rte_spinlock_init(
     174                 :            :                                         &data->priv_timer[lcore_id].list_lock);
     175                 :    1269760 :                                 data->priv_timer[lcore_id].prev_lcore =
     176                 :            :                                         lcore_id;
     177                 :            :                         }
     178                 :            :                 }
     179                 :            :         }
     180                 :            : 
     181                 :        180 :         rte_timer_data_arr[default_data_id].internal_flags |= FL_ALLOCATED;
     182                 :        180 :         (*rte_timer_mz_refcnt)++;
     183                 :            : 
     184                 :        180 :         rte_timer_subsystem_initialized = 1;
     185                 :            : 
     186                 :        180 :         rte_mcfg_timer_unlock();
     187                 :            : 
     188                 :        180 :         return 0;
     189                 :            : }
     190                 :            : 
     191                 :            : RTE_EXPORT_SYMBOL(rte_timer_subsystem_finalize)
     192                 :            : void
     193                 :        247 : rte_timer_subsystem_finalize(void)
     194                 :            : {
     195                 :        247 :         rte_mcfg_timer_lock();
     196                 :            : 
     197         [ +  + ]:        247 :         if (!rte_timer_subsystem_initialized) {
     198                 :         72 :                 rte_mcfg_timer_unlock();
     199                 :         72 :                 return;
     200                 :            :         }
     201                 :            : 
     202         [ +  + ]:        175 :         if (--(*rte_timer_mz_refcnt) == 0)
     203                 :        150 :                 rte_memzone_free(rte_timer_data_mz);
     204                 :            : 
     205                 :        175 :         rte_timer_subsystem_initialized = 0;
     206                 :            : 
     207                 :        175 :         rte_mcfg_timer_unlock();
     208                 :            : }
     209                 :            : 
     210                 :            : /* Initialize the timer handle tim for use */
     211                 :            : RTE_EXPORT_SYMBOL(rte_timer_init)
     212                 :            : void
     213                 :       8196 : rte_timer_init(struct rte_timer *tim)
     214                 :            : {
     215                 :            :         union rte_timer_status status;
     216                 :            : 
     217                 :            :         status.state = RTE_TIMER_STOP;
     218                 :            :         status.owner = RTE_TIMER_NO_OWNER;
     219                 :       8196 :         rte_atomic_store_explicit(&tim->status.u32, status.u32, rte_memory_order_relaxed);
     220                 :       8196 : }
     221                 :            : 
     222                 :            : /*
     223                 :            :  * if timer is pending or stopped (or running on the same core than
     224                 :            :  * us), mark timer as configuring, and on success return the previous
     225                 :            :  * status of the timer
     226                 :            :  */
     227                 :            : static int
     228                 :     334856 : timer_set_config_state(struct rte_timer *tim,
     229                 :            :                        union rte_timer_status *ret_prev_status,
     230                 :            :                        struct priv_timer *priv_timer)
     231                 :            : {
     232                 :            :         union rte_timer_status prev_status, status;
     233                 :            :         int success = 0;
     234                 :            :         unsigned lcore_id;
     235                 :            : 
     236                 :            :         lcore_id = rte_lcore_id();
     237                 :            : 
     238                 :            :         /* wait that the timer is in correct status before update,
     239                 :            :          * and mark it as being configured */
     240                 :     334856 :         prev_status.u32 = rte_atomic_load_explicit(&tim->status.u32, rte_memory_order_relaxed);
     241                 :            : 
     242         [ +  + ]:     668292 :         while (success == 0) {
     243                 :            :                 /* timer is running on another core
     244                 :            :                  * or ready to run on local core, exit
     245                 :            :                  */
     246         [ +  + ]:     334469 :                 if (prev_status.state == RTE_TIMER_RUNNING &&
     247         [ +  + ]:         21 :                     (prev_status.owner != (uint16_t)lcore_id ||
     248         [ +  - ]:          9 :                      tim != priv_timer[lcore_id].running_tim))
     249                 :            :                         return -1;
     250                 :            : 
     251                 :            :                 /* timer is being configured on another core */
     252         [ +  + ]:     334457 :                 if (prev_status.state == RTE_TIMER_CONFIG)
     253                 :            :                         return -1;
     254                 :            : 
     255                 :            :                 /* here, we know that timer is stopped or pending,
     256                 :            :                  * mark it atomically as being configured */
     257                 :     333436 :                 status.state = RTE_TIMER_CONFIG;
     258                 :     333436 :                 status.owner = (int16_t)lcore_id;
     259                 :            :                 /* CONFIG states are acting as locked states. If the
     260                 :            :                  * timer is in CONFIG state, the state cannot be changed
     261                 :            :                  * by other threads. So, we should use ACQUIRE here.
     262                 :            :                  */
     263                 :     333436 :                 success = rte_atomic_compare_exchange_strong_explicit(&tim->status.u32,
     264                 :            :                                               (uint32_t *)(uintptr_t)&prev_status.u32,
     265                 :            :                                               status.u32,
     266                 :            :                                               rte_memory_order_acquire,
     267                 :            :                                               rte_memory_order_relaxed);
     268                 :            :         }
     269                 :            : 
     270                 :     333823 :         ret_prev_status->u32 = prev_status.u32;
     271                 :     333823 :         return 0;
     272                 :            : }
     273                 :            : 
     274                 :            : /*
     275                 :            :  * if timer is pending, mark timer as running
     276                 :            :  */
     277                 :            : static int
     278                 :      17247 : timer_set_running_state(struct rte_timer *tim)
     279                 :            : {
     280                 :            :         union rte_timer_status prev_status, status;
     281                 :            :         unsigned lcore_id = rte_lcore_id();
     282                 :            :         int success = 0;
     283                 :            : 
     284                 :            :         /* wait that the timer is in correct status before update,
     285                 :            :          * and mark it as running */
     286                 :      17247 :         prev_status.u32 = rte_atomic_load_explicit(&tim->status.u32, rte_memory_order_relaxed);
     287                 :            : 
     288         [ +  + ]:      34489 :         while (success == 0) {
     289                 :            :                 /* timer is not pending anymore */
     290         [ +  + ]:      17245 :                 if (prev_status.state != RTE_TIMER_PENDING)
     291                 :            :                         return -1;
     292                 :            : 
     293                 :            :                 /* we know that the timer will be pending at this point
     294                 :            :                  * mark it atomically as being running
     295                 :            :                  */
     296                 :      17242 :                 status.state = RTE_TIMER_RUNNING;
     297                 :      17242 :                 status.owner = (int16_t)lcore_id;
     298                 :            :                 /* RUNNING states are acting as locked states. If the
     299                 :            :                  * timer is in RUNNING state, the state cannot be changed
     300                 :            :                  * by other threads. So, we should use ACQUIRE here.
     301                 :            :                  */
     302                 :      17242 :                 success = rte_atomic_compare_exchange_strong_explicit(&tim->status.u32,
     303                 :            :                                               (uint32_t *)(uintptr_t)&prev_status.u32,
     304                 :            :                                               status.u32,
     305                 :            :                                               rte_memory_order_acquire,
     306                 :            :                                               rte_memory_order_relaxed);
     307                 :            :         }
     308                 :            : 
     309                 :            :         return 0;
     310                 :            : }
     311                 :            : 
     312                 :            : /*
     313                 :            :  * Return a skiplist level for a new entry.
     314                 :            :  * This probabilistically gives a level with p=1/4 that an entry at level n
     315                 :            :  * will also appear at level n+1.
     316                 :            :  */
     317                 :            : static uint32_t
     318                 :            : timer_get_skiplist_level(unsigned curr_depth)
     319                 :            : {
     320                 :            : #ifdef RTE_LIBRTE_TIMER_DEBUG
     321                 :            :         static uint32_t i, count = 0;
     322                 :            :         static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
     323                 :            : #endif
     324                 :            : 
     325                 :            :         /* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
     326                 :            :          * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
     327                 :            :          * bit position of a (pseudo)random number.
     328                 :            :          */
     329                 :     465748 :         uint32_t rand = rte_rand() & (UINT32_MAX - 1);
     330         [ +  - ]:     232874 :         uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
     331                 :            : 
     332                 :            :         /* limit the levels used to one above our current level, so we don't,
     333                 :            :          * for instance, have a level 0 and a level 7 without anything between
     334                 :            :          */
     335                 :            :         if (level > curr_depth)
     336                 :            :                 level = curr_depth;
     337                 :            :         if (level >= MAX_SKIPLIST_DEPTH)
     338                 :            :                 level = MAX_SKIPLIST_DEPTH-1;
     339                 :            : #ifdef RTE_LIBRTE_TIMER_DEBUG
     340                 :            :         count ++;
     341                 :            :         levels[level]++;
     342                 :            :         if (count % 10000 == 0)
     343                 :            :                 for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
     344                 :            :                         printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
     345                 :            : #endif
     346                 :            :         return level;
     347                 :            : }
     348                 :            : 
     349                 :            : /*
     350                 :            :  * For a given time value, get the entries at each level which
     351                 :            :  * are <= that time value.
     352                 :            :  */
     353                 :            : static void
     354                 :     449373 : timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
     355                 :            :                        struct rte_timer **prev, struct priv_timer *priv_timer)
     356                 :            : {
     357                 :     449373 :         unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
     358                 :     449373 :         prev[lvl] = &priv_timer[tim_lcore].pending_head;
     359         [ +  + ]:    3674125 :         while(lvl != 0) {
     360                 :    3224752 :                 lvl--;
     361                 :    3224752 :                 prev[lvl] = prev[lvl+1];
     362         [ +  + ]:   11148773 :                 while (prev[lvl]->sl_next[lvl] &&
     363         [ +  + ]:    9401256 :                                 prev[lvl]->sl_next[lvl]->expire <= time_val)
     364                 :    7924021 :                         prev[lvl] = prev[lvl]->sl_next[lvl];
     365                 :            :         }
     366                 :     449373 : }
     367                 :            : 
     368                 :            : /*
     369                 :            :  * Given a timer node in the skiplist, find the previous entries for it at
     370                 :            :  * all skiplist levels.
     371                 :            :  */
     372                 :            : static void
     373                 :     215636 : timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
     374                 :            :                                 struct rte_timer **prev,
     375                 :            :                                 struct priv_timer *priv_timer)
     376                 :            : {
     377                 :            :         int i;
     378                 :            : 
     379                 :            :         /* to get a specific entry in the list, look for just lower than the time
     380                 :            :          * values, and then increment on each level individually if necessary
     381                 :            :          */
     382                 :     215636 :         timer_get_prev_entries(tim->expire - 1, tim_lcore, prev, priv_timer);
     383         [ +  + ]:    1776507 :         for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
     384         [ +  + ]:    1441504 :                 while (prev[i]->sl_next[i] != NULL &&
     385         [ +  + ]:    1560871 :                                 prev[i]->sl_next[i] != tim &&
     386         [ -  + ]:    1154591 :                                 prev[i]->sl_next[i]->expire <= tim->expire)
     387                 :          0 :                         prev[i] = prev[i]->sl_next[i];
     388                 :            :         }
     389                 :     215636 : }
     390                 :            : 
     391                 :            : /* call with lock held as necessary
     392                 :            :  * add in list
     393                 :            :  * timer must be in config state
     394                 :            :  * timer must not be in a list
     395                 :            :  */
     396                 :            : static void
     397                 :     232874 : timer_add(struct rte_timer *tim, unsigned int tim_lcore,
     398                 :            :           struct priv_timer *priv_timer)
     399                 :            : {
     400                 :            :         unsigned lvl;
     401                 :            :         struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
     402                 :            : 
     403                 :            :         /* find where exactly this element goes in the list of elements
     404                 :            :          * for each depth. */
     405                 :     232874 :         timer_get_prev_entries(tim->expire, tim_lcore, prev, priv_timer);
     406                 :            : 
     407                 :            :         /* now assign it a new level and add at that level */
     408                 :     232874 :         const unsigned tim_level = timer_get_skiplist_level(
     409                 :     232874 :                         priv_timer[tim_lcore].curr_skiplist_depth);
     410         [ +  + ]:     232874 :         if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
     411                 :       1134 :                 priv_timer[tim_lcore].curr_skiplist_depth++;
     412                 :            : 
     413                 :            :         lvl = tim_level;
     414         [ +  + ]:     309679 :         while (lvl > 0) {
     415                 :      76805 :                 tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
     416                 :      76805 :                 prev[lvl]->sl_next[lvl] = tim;
     417                 :      76805 :                 lvl--;
     418                 :            :         }
     419                 :     232874 :         tim->sl_next[0] = prev[0]->sl_next[0];
     420                 :     232874 :         prev[0]->sl_next[0] = tim;
     421                 :            : 
     422                 :            :         /* save the lowest list entry into the expire field of the dummy hdr
     423                 :            :          * NOTE: this is not atomic on 32-bit*/
     424                 :     232874 :         priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
     425                 :     232874 :                         pending_head.sl_next[0]->expire;
     426                 :     232874 : }
     427                 :            : 
     428                 :            : /*
     429                 :            :  * del from list, lock if needed
     430                 :            :  * timer must be in config state
     431                 :            :  * timer must be in a list
     432                 :            :  */
     433                 :            : static void
     434         [ +  + ]:     215579 : timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
     435                 :            :           int local_is_locked, struct priv_timer *priv_timer)
     436                 :            : {
     437                 :            :         unsigned lcore_id = rte_lcore_id();
     438                 :     215579 :         unsigned prev_owner = prev_status.owner;
     439                 :            :         int i;
     440                 :            :         struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
     441                 :            : 
     442                 :            :         /* if timer needs is pending another core, we need to lock the
     443                 :            :          * list; if it is on local core, we need to lock if we are not
     444                 :            :          * called from rte_timer_manage() */
     445         [ +  + ]:     215579 :         if (prev_owner != lcore_id || !local_is_locked)
     446                 :     215574 :                 rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
     447                 :            : 
     448                 :            :         /* save the lowest list entry into the expire field of the dummy hdr.
     449                 :            :          * NOTE: this is not atomic on 32-bit */
     450         [ +  + ]:     215635 :         if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
     451                 :        265 :                 priv_timer[prev_owner].pending_head.expire =
     452         [ +  + ]:        265 :                                 ((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
     453                 :            : 
     454                 :            :         /* adjust pointers from previous entries to point past this */
     455                 :     215635 :         timer_get_prev_entries_for_node(tim, prev_owner, prev, priv_timer);
     456         [ +  + ]:    1776507 :         for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
     457         [ +  + ]:    1560871 :                 if (prev[i]->sl_next[i] == tim)
     458                 :     286913 :                         prev[i]->sl_next[i] = tim->sl_next[i];
     459                 :            :         }
     460                 :            : 
     461                 :            :         /* in case we deleted last entry at a level, adjust down max level */
     462         [ +  + ]:     215898 :         for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
     463         [ +  + ]:     215646 :                 if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
     464                 :        262 :                         priv_timer[prev_owner].curr_skiplist_depth --;
     465                 :            :                 else
     466                 :            :                         break;
     467                 :            : 
     468         [ +  + ]:     215636 :         if (prev_owner != lcore_id || !local_is_locked)
     469                 :     215630 :                 rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
     470                 :     215635 : }
     471                 :            : 
     472                 :            : /* Reset and start the timer associated with the timer handle (private func) */
     473                 :            : static int
     474         [ -  + ]:     233758 : __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
     475                 :            :                   uint64_t period, unsigned tim_lcore,
     476                 :            :                   rte_timer_cb_t fct, void *arg,
     477                 :            :                   int local_is_locked,
     478                 :            :                   struct rte_timer_data *timer_data)
     479                 :            : {
     480                 :            :         union rte_timer_status prev_status, status;
     481                 :            :         int ret;
     482                 :            :         unsigned lcore_id = rte_lcore_id();
     483                 :     233758 :         struct priv_timer *priv_timer = timer_data->priv_timer;
     484                 :            : 
     485                 :            :         /* round robin for tim_lcore */
     486         [ -  + ]:     233758 :         if (tim_lcore == (unsigned)LCORE_ID_ANY) {
     487         [ #  # ]:          0 :                 if (lcore_id < RTE_MAX_LCORE) {
     488                 :            :                         /* EAL thread with valid lcore_id */
     489                 :          0 :                         tim_lcore = rte_get_next_lcore(
     490                 :          0 :                                 priv_timer[lcore_id].prev_lcore,
     491                 :            :                                 0, 1);
     492                 :          0 :                         priv_timer[lcore_id].prev_lcore = tim_lcore;
     493                 :            :                 } else
     494                 :            :                         /* non-EAL thread do not run rte_timer_manage(),
     495                 :            :                          * so schedule the timer on the first enabled lcore. */
     496                 :          0 :                         tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1);
     497                 :            :         }
     498                 :            : 
     499                 :            :         /* wait that the timer is in correct status before update,
     500                 :            :          * and mark it as being configured */
     501                 :     233758 :         ret = timer_set_config_state(tim, &prev_status, priv_timer);
     502         [ +  + ]:     233758 :         if (ret < 0)
     503                 :            :                 return -1;
     504                 :            : 
     505                 :            :         __TIMER_STAT_ADD(priv_timer, reset, 1);
     506   [ +  +  +  - ]:     232736 :         if (prev_status.state == RTE_TIMER_RUNNING &&
     507                 :            :             lcore_id < RTE_MAX_LCORE) {
     508                 :          6 :                 priv_timer[lcore_id].updated = 1;
     509                 :            :         }
     510                 :            : 
     511                 :            :         /* remove it from list */
     512         [ +  + ]:     232736 :         if (prev_status.state == RTE_TIMER_PENDING) {
     513                 :     115508 :                 timer_del(tim, prev_status, local_is_locked, priv_timer);
     514                 :            :                 __TIMER_STAT_ADD(priv_timer, pending, -1);
     515                 :            :         }
     516                 :            : 
     517                 :     232750 :         tim->period = period;
     518                 :     232750 :         tim->expire = expire;
     519                 :     232750 :         tim->f = fct;
     520                 :     232750 :         tim->arg = arg;
     521                 :            : 
     522                 :            :         /* if timer needs to be scheduled on another core, we need to
     523                 :            :          * lock the destination list; if it is on local core, we need to lock if
     524                 :            :          * we are not called from rte_timer_manage()
     525                 :            :          */
     526         [ +  + ]:     232750 :         if (tim_lcore != lcore_id || !local_is_locked)
     527                 :     232583 :                 rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
     528                 :            : 
     529                 :            :         __TIMER_STAT_ADD(priv_timer, pending, 1);
     530                 :     233035 :         timer_add(tim, tim_lcore, priv_timer);
     531                 :            : 
     532                 :            :         /* update state: as we are in CONFIG state, only us can modify
     533                 :            :          * the state so we don't need to use cmpset() here */
     534                 :     232874 :         status.state = RTE_TIMER_PENDING;
     535                 :     232874 :         status.owner = (int16_t)tim_lcore;
     536                 :            :         /* The "RELEASE" ordering guarantees the memory operations above
     537                 :            :          * the status update are observed before the update by all threads
     538                 :            :          */
     539                 :     232874 :         rte_atomic_store_explicit(&tim->status.u32, status.u32, rte_memory_order_release);
     540                 :            : 
     541         [ +  + ]:     232874 :         if (tim_lcore != lcore_id || !local_is_locked)
     542                 :     232868 :                 rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
     543                 :            : 
     544                 :            :         return 0;
     545                 :            : }
     546                 :            : 
     547                 :            : /* Reset and start the timer associated with the timer handle tim */
     548                 :            : RTE_EXPORT_SYMBOL(rte_timer_reset)
     549                 :            : int
     550                 :     233843 : rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
     551                 :            :                       enum rte_timer_type type, unsigned int tim_lcore,
     552                 :            :                       rte_timer_cb_t fct, void *arg)
     553                 :            : {
     554                 :     233843 :         return rte_timer_alt_reset(default_data_id, tim, ticks, type,
     555                 :            :                                    tim_lcore, fct, arg);
     556                 :            : }
     557                 :            : 
     558                 :            : RTE_EXPORT_SYMBOL(rte_timer_alt_reset)
     559                 :            : int
     560                 :     233836 : rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
     561                 :            :                     uint64_t ticks, enum rte_timer_type type,
     562                 :            :                     unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
     563                 :            : {
     564                 :            :         uint64_t cur_time = rte_get_timer_cycles();
     565                 :            :         uint64_t period;
     566                 :            :         struct rte_timer_data *timer_data;
     567                 :            : 
     568         [ +  - ]:     233836 :         TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
     569                 :            : 
     570         [ +  + ]:     233838 :         if (type == PERIODICAL)
     571                 :            :                 period = ticks;
     572                 :            :         else
     573                 :            :                 period = 0;
     574                 :            : 
     575                 :     233838 :         return __rte_timer_reset(tim,  cur_time + ticks, period, tim_lcore,
     576                 :            :                                  fct, arg, 0, timer_data);
     577                 :            : }
     578                 :            : 
     579                 :            : /* loop until rte_timer_reset() succeed */
     580                 :            : RTE_EXPORT_SYMBOL(rte_timer_reset_sync)
     581                 :            : void
     582                 :       1105 : rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
     583                 :            :                      enum rte_timer_type type, unsigned tim_lcore,
     584                 :            :                      rte_timer_cb_t fct, void *arg)
     585                 :            : {
     586                 :       1121 :         while (rte_timer_reset(tim, ticks, type, tim_lcore,
     587         [ +  + ]:       1122 :                                fct, arg) != 0)
     588                 :            :                 rte_pause();
     589                 :       1106 : }
     590                 :            : 
     591                 :            : static int
     592                 :     100977 : __rte_timer_stop(struct rte_timer *tim,
     593                 :            :                  struct rte_timer_data *timer_data)
     594                 :            : {
     595                 :            :         union rte_timer_status prev_status, status;
     596                 :            :         unsigned lcore_id = rte_lcore_id();
     597                 :            :         int ret;
     598                 :     100977 :         struct priv_timer *priv_timer = timer_data->priv_timer;
     599                 :            : 
     600                 :            :         /* wait that the timer is in correct status before update,
     601                 :            :          * and mark it as being configured */
     602                 :     100977 :         ret = timer_set_config_state(tim, &prev_status, priv_timer);
     603         [ +  + ]:     100970 :         if (ret < 0)
     604                 :            :                 return -1;
     605                 :            : 
     606                 :            :         __TIMER_STAT_ADD(priv_timer, stop, 1);
     607   [ +  +  +  - ]:     100959 :         if (prev_status.state == RTE_TIMER_RUNNING &&
     608                 :            :             lcore_id < RTE_MAX_LCORE) {
     609                 :          3 :                 priv_timer[lcore_id].updated = 1;
     610                 :            :         }
     611                 :            : 
     612                 :            :         /* remove it from list */
     613         [ +  + ]:     100959 :         if (prev_status.state == RTE_TIMER_PENDING) {
     614                 :     100102 :                 timer_del(tim, prev_status, 0, priv_timer);
     615                 :            :                 __TIMER_STAT_ADD(priv_timer, pending, -1);
     616                 :            :         }
     617                 :            : 
     618                 :            :         /* mark timer as stopped */
     619                 :            :         status.state = RTE_TIMER_STOP;
     620                 :            :         status.owner = RTE_TIMER_NO_OWNER;
     621                 :            :         /* The "RELEASE" ordering guarantees the memory operations above
     622                 :            :          * the status update are observed before the update by all threads
     623                 :            :          */
     624                 :     100971 :         rte_atomic_store_explicit(&tim->status.u32, status.u32, rte_memory_order_release);
     625                 :            : 
     626                 :     100971 :         return 0;
     627                 :            : }
     628                 :            : 
     629                 :            : /* Stop the timer associated with the timer handle tim */
     630                 :            : RTE_EXPORT_SYMBOL(rte_timer_stop)
     631                 :            : int
     632                 :     100978 : rte_timer_stop(struct rte_timer *tim)
     633                 :            : {
     634                 :     100978 :         return rte_timer_alt_stop(default_data_id, tim);
     635                 :            : }
     636                 :            : 
     637                 :            : RTE_EXPORT_SYMBOL(rte_timer_alt_stop)
     638                 :            : int
     639                 :     100978 : rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
     640                 :            : {
     641                 :            :         struct rte_timer_data *timer_data;
     642                 :            : 
     643         [ +  - ]:     100978 :         TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
     644                 :            : 
     645                 :     100975 :         return __rte_timer_stop(tim, timer_data);
     646                 :            : }
     647                 :            : 
     648                 :            : /* loop until rte_timer_stop() succeed */
     649                 :            : RTE_EXPORT_SYMBOL(rte_timer_stop_sync)
     650                 :            : void
     651                 :        978 : rte_timer_stop_sync(struct rte_timer *tim)
     652                 :            : {
     653         [ +  + ]:        979 :         while (rte_timer_stop(tim) != 0)
     654                 :            :                 rte_pause();
     655                 :        978 : }
     656                 :            : 
     657                 :            : /* Test the PENDING status of the timer handle tim */
     658                 :            : RTE_EXPORT_SYMBOL(rte_timer_pending)
     659                 :            : int
     660                 :        860 : rte_timer_pending(struct rte_timer *tim)
     661                 :            : {
     662                 :        860 :         return rte_atomic_load_explicit(&tim->status.state,
     663                 :        860 :                                 rte_memory_order_relaxed) == RTE_TIMER_PENDING;
     664                 :            : }
     665                 :            : 
     666                 :            : /* must be called periodically, run all timer that expired */
     667                 :            : static void
     668         [ -  + ]:     485799 : __rte_timer_manage(struct rte_timer_data *timer_data)
     669                 :            : {
     670                 :            :         union rte_timer_status status;
     671                 :            :         struct rte_timer *tim, *next_tim;
     672                 :            :         struct rte_timer *run_first_tim, **pprev;
     673                 :            :         unsigned lcore_id = rte_lcore_id();
     674                 :            :         struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
     675                 :            :         uint64_t cur_time;
     676                 :            :         int i, ret;
     677                 :     485799 :         struct priv_timer *priv_timer = timer_data->priv_timer;
     678                 :            : 
     679                 :            :         /* timer manager only runs on EAL thread with valid lcore_id */
     680         [ -  + ]:     485799 :         assert(lcore_id < RTE_MAX_LCORE);
     681                 :            : 
     682                 :            :         __TIMER_STAT_ADD(priv_timer, manage, 1);
     683                 :            :         /* optimize for the case where per-cpu list is empty */
     684         [ +  + ]:     485799 :         if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
     685                 :     487047 :                 return;
     686                 :            :         cur_time = rte_get_timer_cycles();
     687                 :            : 
     688                 :            : #ifdef RTE_ARCH_64
     689                 :            :         /* on 64-bit the value cached in the pending_head.expired will be
     690                 :            :          * updated atomically, so we can consult that for a quick check here
     691                 :            :          * outside the lock */
     692         [ +  + ]:     303414 :         if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
     693                 :            :                 return;
     694                 :            : #endif
     695                 :            : 
     696                 :            :         /* browse ordered list, add expired timers in 'expired' list */
     697                 :        865 :         rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
     698                 :            : 
     699                 :            :         /* if nothing to do just unlock and return */
     700         [ +  + ]:        867 :         if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
     701         [ -  + ]:        863 :             priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
     702                 :            :                 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
     703                 :          4 :                 return;
     704                 :            :         }
     705                 :            : 
     706                 :            :         /* save start of list of expired timers */
     707                 :            :         tim = priv_timer[lcore_id].pending_head.sl_next[0];
     708                 :            : 
     709                 :            :         /* break the existing list at current time point */
     710                 :        863 :         timer_get_prev_entries(cur_time, lcore_id, prev, priv_timer);
     711         [ +  + ]:       1742 :         for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
     712         [ +  + ]:        879 :                 if (prev[i] == &priv_timer[lcore_id].pending_head)
     713                 :          2 :                         continue;
     714                 :        877 :                 priv_timer[lcore_id].pending_head.sl_next[i] =
     715                 :        877 :                     prev[i]->sl_next[i];
     716         [ +  + ]:        877 :                 if (prev[i]->sl_next[i] == NULL)
     717                 :        872 :                         priv_timer[lcore_id].curr_skiplist_depth--;
     718                 :        877 :                 prev[i] ->sl_next[i] = NULL;
     719                 :            :         }
     720                 :            : 
     721                 :            :         /* transition run-list from PENDING to RUNNING */
     722                 :        863 :         run_first_tim = tim;
     723                 :            :         pprev = &run_first_tim;
     724                 :            : 
     725         [ +  + ]:      18109 :         for ( ; tim != NULL; tim = next_tim) {
     726                 :      17246 :                 next_tim = tim->sl_next[0];
     727                 :            : 
     728                 :      17246 :                 ret = timer_set_running_state(tim);
     729         [ +  + ]:      17246 :                 if (likely(ret == 0)) {
     730                 :      17243 :                         pprev = &tim->sl_next[0];
     731                 :            :                 } else {
     732                 :            :                         /* another core is trying to re-config this one,
     733                 :            :                          * remove it from local expired list
     734                 :            :                          */
     735                 :          3 :                         *pprev = next_tim;
     736                 :            :                 }
     737                 :            :         }
     738                 :            : 
     739                 :            :         /* update the next to expire timer value */
     740                 :        863 :         priv_timer[lcore_id].pending_head.expire =
     741         [ +  + ]:        863 :             (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
     742                 :            :                 priv_timer[lcore_id].pending_head.sl_next[0]->expire;
     743                 :            : 
     744                 :            :         rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
     745                 :            : 
     746                 :            :         /* now scan expired list and call callbacks */
     747         [ +  + ]:      18107 :         for (tim = run_first_tim; tim != NULL; tim = next_tim) {
     748                 :      17244 :                 next_tim = tim->sl_next[0];
     749                 :      17244 :                 priv_timer[lcore_id].updated = 0;
     750                 :      17244 :                 priv_timer[lcore_id].running_tim = tim;
     751                 :            : 
     752                 :            :                 /* execute callback function with list unlocked */
     753                 :      17244 :                 tim->f(tim, tim->arg);
     754                 :            : 
     755                 :            :                 __TIMER_STAT_ADD(priv_timer, pending, -1);
     756                 :            :                 /* the timer was stopped or reloaded by the callback
     757                 :            :                  * function, we have nothing to do here */
     758         [ +  + ]:      17244 :                 if (priv_timer[lcore_id].updated == 1)
     759                 :          9 :                         continue;
     760                 :            : 
     761         [ +  + ]:      17235 :                 if (tim->period == 0) {
     762                 :            :                         /* remove from done list and mark timer as stopped */
     763                 :      17229 :                         status.state = RTE_TIMER_STOP;
     764                 :      17229 :                         status.owner = RTE_TIMER_NO_OWNER;
     765                 :            :                         /* The "RELEASE" ordering guarantees the memory
     766                 :            :                          * operations above the status update are observed
     767                 :            :                          * before the update by all threads
     768                 :            :                          */
     769                 :      17229 :                         rte_atomic_store_explicit(&tim->status.u32, status.u32,
     770                 :            :                                 rte_memory_order_release);
     771                 :            :                 }
     772                 :            :                 else {
     773                 :            :                         /* keep it in list and mark timer as pending */
     774                 :            :                         rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
     775                 :          6 :                         status.state = RTE_TIMER_PENDING;
     776                 :            :                         __TIMER_STAT_ADD(priv_timer, pending, 1);
     777                 :          6 :                         status.owner = (int16_t)lcore_id;
     778                 :            :                         /* The "RELEASE" ordering guarantees the memory
     779                 :            :                          * operations above the status update are observed
     780                 :            :                          * before the update by all threads
     781                 :            :                          */
     782                 :          6 :                         rte_atomic_store_explicit(&tim->status.u32, status.u32,
     783                 :            :                                 rte_memory_order_release);
     784                 :          6 :                         __rte_timer_reset(tim, tim->expire + tim->period,
     785                 :            :                                 tim->period, lcore_id, tim->f, tim->arg, 1,
     786                 :            :                                 timer_data);
     787                 :            :                         rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
     788                 :            :                 }
     789                 :            :         }
     790                 :        863 :         priv_timer[lcore_id].running_tim = NULL;
     791                 :            : }
     792                 :            : 
     793                 :            : RTE_EXPORT_SYMBOL(rte_timer_manage)
     794                 :            : int
     795         [ +  - ]:     476694 : rte_timer_manage(void)
     796                 :            : {
     797                 :            :         struct rte_timer_data *timer_data;
     798                 :            : 
     799                 :            :         TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
     800                 :            : 
     801                 :     476887 :         __rte_timer_manage(timer_data);
     802                 :            : 
     803                 :     480956 :         return 0;
     804                 :            : }
     805                 :            : 
     806                 :            : RTE_EXPORT_SYMBOL(rte_timer_alt_manage)
     807                 :            : int
     808         [ #  # ]:          0 : rte_timer_alt_manage(uint32_t timer_data_id,
     809                 :            :                      unsigned int *poll_lcores,
     810                 :            :                      int nb_poll_lcores,
     811                 :            :                      rte_timer_alt_manage_cb_t f)
     812                 :            : {
     813                 :          0 :         unsigned int default_poll_lcores[] = {rte_lcore_id()};
     814                 :            :         union rte_timer_status status;
     815                 :            :         struct rte_timer *tim, *next_tim, **pprev;
     816                 :            :         struct rte_timer *run_first_tims[RTE_MAX_LCORE];
     817                 :            :         unsigned int this_lcore = rte_lcore_id();
     818                 :            :         struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
     819                 :            :         uint64_t cur_time;
     820                 :            :         int i, j, ret;
     821                 :            :         int nb_runlists = 0;
     822                 :            :         struct rte_timer_data *data;
     823                 :            :         struct priv_timer *privp;
     824                 :            :         uint32_t poll_lcore;
     825                 :            : 
     826         [ #  # ]:          0 :         TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, data, -EINVAL);
     827                 :            : 
     828                 :            :         /* timer manager only runs on EAL thread with valid lcore_id */
     829         [ #  # ]:          0 :         assert(this_lcore < RTE_MAX_LCORE);
     830                 :            : 
     831                 :            :         __TIMER_STAT_ADD(data->priv_timer, manage, 1);
     832                 :            : 
     833         [ #  # ]:          0 :         if (poll_lcores == NULL) {
     834                 :            :                 poll_lcores = default_poll_lcores;
     835                 :            :                 nb_poll_lcores = RTE_DIM(default_poll_lcores);
     836                 :            :         }
     837                 :            : 
     838         [ #  # ]:          0 :         for (i = 0; i < nb_poll_lcores; i++) {
     839                 :          0 :                 poll_lcore = poll_lcores[i];
     840                 :            :                 privp = &data->priv_timer[poll_lcore];
     841                 :            : 
     842                 :            :                 /* optimize for the case where per-cpu list is empty */
     843         [ #  # ]:          0 :                 if (privp->pending_head.sl_next[0] == NULL)
     844                 :          0 :                         continue;
     845                 :            :                 cur_time = rte_get_timer_cycles();
     846                 :            : 
     847                 :            : #ifdef RTE_ARCH_64
     848                 :            :                 /* on 64-bit the value cached in the pending_head.expired will
     849                 :            :                  * be updated atomically, so we can consult that for a quick
     850                 :            :                  * check here outside the lock
     851                 :            :                  */
     852         [ #  # ]:          0 :                 if (likely(privp->pending_head.expire > cur_time))
     853                 :          0 :                         continue;
     854                 :            : #endif
     855                 :            : 
     856                 :            :                 /* browse ordered list, add expired timers in 'expired' list */
     857                 :          0 :                 rte_spinlock_lock(&privp->list_lock);
     858                 :            : 
     859                 :            :                 /* if nothing to do just unlock and return */
     860         [ #  # ]:          0 :                 if (privp->pending_head.sl_next[0] == NULL ||
     861         [ #  # ]:          0 :                     privp->pending_head.sl_next[0]->expire > cur_time) {
     862                 :            :                         rte_spinlock_unlock(&privp->list_lock);
     863                 :          0 :                         continue;
     864                 :            :                 }
     865                 :            : 
     866                 :            :                 /* save start of list of expired timers */
     867                 :            :                 tim = privp->pending_head.sl_next[0];
     868                 :            : 
     869                 :            :                 /* break the existing list at current time point */
     870                 :          0 :                 timer_get_prev_entries(cur_time, poll_lcore, prev,
     871                 :          0 :                                        data->priv_timer);
     872         [ #  # ]:          0 :                 for (j = privp->curr_skiplist_depth - 1; j >= 0; j--) {
     873         [ #  # ]:          0 :                         if (prev[j] == &privp->pending_head)
     874                 :          0 :                                 continue;
     875                 :          0 :                         privp->pending_head.sl_next[j] =
     876                 :          0 :                                 prev[j]->sl_next[j];
     877         [ #  # ]:          0 :                         if (prev[j]->sl_next[j] == NULL)
     878                 :          0 :                                 privp->curr_skiplist_depth--;
     879                 :            : 
     880                 :          0 :                         prev[j]->sl_next[j] = NULL;
     881                 :            :                 }
     882                 :            : 
     883                 :            :                 /* transition run-list from PENDING to RUNNING */
     884                 :          0 :                 run_first_tims[nb_runlists] = tim;
     885                 :          0 :                 pprev = &run_first_tims[nb_runlists];
     886                 :          0 :                 nb_runlists++;
     887                 :            : 
     888         [ #  # ]:          0 :                 for ( ; tim != NULL; tim = next_tim) {
     889                 :          0 :                         next_tim = tim->sl_next[0];
     890                 :            : 
     891                 :          0 :                         ret = timer_set_running_state(tim);
     892         [ #  # ]:          0 :                         if (likely(ret == 0)) {
     893                 :          0 :                                 pprev = &tim->sl_next[0];
     894                 :            :                         } else {
     895                 :            :                                 /* another core is trying to re-config this one,
     896                 :            :                                  * remove it from local expired list
     897                 :            :                                  */
     898                 :          0 :                                 *pprev = next_tim;
     899                 :            :                         }
     900                 :            :                 }
     901                 :            : 
     902                 :            :                 /* update the next to expire timer value */
     903                 :          0 :                 privp->pending_head.expire =
     904         [ #  # ]:          0 :                     (privp->pending_head.sl_next[0] == NULL) ? 0 :
     905                 :            :                         privp->pending_head.sl_next[0]->expire;
     906                 :            : 
     907                 :            :                 rte_spinlock_unlock(&privp->list_lock);
     908                 :            :         }
     909                 :            : 
     910                 :            :         /* Now process the run lists */
     911                 :            :         while (1) {
     912                 :            :                 bool done = true;
     913                 :            :                 uint64_t min_expire = UINT64_MAX;
     914                 :            :                 int min_idx = 0;
     915                 :            : 
     916                 :            :                 /* Find the next oldest timer to process */
     917         [ #  # ]:          0 :                 for (i = 0; i < nb_runlists; i++) {
     918                 :          0 :                         tim = run_first_tims[i];
     919                 :            : 
     920   [ #  #  #  # ]:          0 :                         if (tim != NULL && tim->expire < min_expire) {
     921                 :            :                                 min_expire = tim->expire;
     922                 :            :                                 min_idx = i;
     923                 :            :                                 done = false;
     924                 :            :                         }
     925                 :            :                 }
     926                 :            : 
     927         [ #  # ]:          0 :                 if (done)
     928                 :            :                         break;
     929                 :            : 
     930                 :          0 :                 tim = run_first_tims[min_idx];
     931                 :            : 
     932                 :            :                 /* Move down the runlist from which we picked a timer to
     933                 :            :                  * execute
     934                 :            :                  */
     935                 :          0 :                 run_first_tims[min_idx] = run_first_tims[min_idx]->sl_next[0];
     936                 :            : 
     937                 :          0 :                 data->priv_timer[this_lcore].updated = 0;
     938                 :          0 :                 data->priv_timer[this_lcore].running_tim = tim;
     939                 :            : 
     940                 :            :                 /* Call the provided callback function */
     941                 :          0 :                 f(tim);
     942                 :            : 
     943                 :            :                 __TIMER_STAT_ADD(data->priv_timer, pending, -1);
     944                 :            : 
     945                 :            :                 /* the timer was stopped or reloaded by the callback
     946                 :            :                  * function, we have nothing to do here
     947                 :            :                  */
     948         [ #  # ]:          0 :                 if (data->priv_timer[this_lcore].updated == 1)
     949                 :          0 :                         continue;
     950                 :            : 
     951         [ #  # ]:          0 :                 if (tim->period == 0) {
     952                 :            :                         /* remove from done list and mark timer as stopped */
     953                 :          0 :                         status.state = RTE_TIMER_STOP;
     954                 :          0 :                         status.owner = RTE_TIMER_NO_OWNER;
     955                 :            :                         /* The "RELEASE" ordering guarantees the memory
     956                 :            :                          * operations above the status update are observed
     957                 :            :                          * before the update by all threads
     958                 :            :                          */
     959                 :          0 :                         rte_atomic_store_explicit(&tim->status.u32, status.u32,
     960                 :            :                                 rte_memory_order_release);
     961                 :            :                 } else {
     962                 :            :                         /* keep it in list and mark timer as pending */
     963                 :          0 :                         rte_spinlock_lock(
     964                 :            :                                 &data->priv_timer[this_lcore].list_lock);
     965                 :          0 :                         status.state = RTE_TIMER_PENDING;
     966                 :            :                         __TIMER_STAT_ADD(data->priv_timer, pending, 1);
     967                 :          0 :                         status.owner = (int16_t)this_lcore;
     968                 :            :                         /* The "RELEASE" ordering guarantees the memory
     969                 :            :                          * operations above the status update are observed
     970                 :            :                          * before the update by all threads
     971                 :            :                          */
     972                 :          0 :                         rte_atomic_store_explicit(&tim->status.u32, status.u32,
     973                 :            :                                 rte_memory_order_release);
     974                 :          0 :                         __rte_timer_reset(tim, tim->expire + tim->period,
     975                 :            :                                 tim->period, this_lcore, tim->f, tim->arg, 1,
     976                 :            :                                 data);
     977                 :            :                         rte_spinlock_unlock(
     978                 :            :                                 &data->priv_timer[this_lcore].list_lock);
     979                 :            :                 }
     980                 :            : 
     981                 :          0 :                 data->priv_timer[this_lcore].running_tim = NULL;
     982                 :            :         }
     983                 :            : 
     984                 :            :         return 0;
     985                 :            : }
     986                 :            : 
     987                 :            : /* Walk pending lists, stopping timers and calling user-specified function */
     988                 :            : RTE_EXPORT_SYMBOL(rte_timer_stop_all)
     989                 :            : int
     990                 :          0 : rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
     991                 :            :                    int nb_walk_lcores,
     992                 :            :                    rte_timer_stop_all_cb_t f, void *f_arg)
     993                 :            : {
     994                 :            :         int i;
     995                 :            :         struct priv_timer *priv_timer;
     996                 :            :         uint32_t walk_lcore;
     997                 :            :         struct rte_timer *tim, *next_tim;
     998                 :            :         struct rte_timer_data *timer_data;
     999                 :            : 
    1000         [ #  # ]:          0 :         TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
    1001                 :            : 
    1002         [ #  # ]:          0 :         for (i = 0; i < nb_walk_lcores; i++) {
    1003                 :          0 :                 walk_lcore = walk_lcores[i];
    1004                 :            :                 priv_timer = &timer_data->priv_timer[walk_lcore];
    1005                 :            : 
    1006                 :          0 :                 for (tim = priv_timer->pending_head.sl_next[0];
    1007         [ #  # ]:          0 :                      tim != NULL;
    1008                 :            :                      tim = next_tim) {
    1009                 :          0 :                         next_tim = tim->sl_next[0];
    1010                 :            : 
    1011                 :          0 :                         __rte_timer_stop(tim, timer_data);
    1012                 :            : 
    1013         [ #  # ]:          0 :                         if (f)
    1014                 :          0 :                                 f(tim, f_arg);
    1015                 :            :                 }
    1016                 :            :         }
    1017                 :            : 
    1018                 :            :         return 0;
    1019                 :            : }
    1020                 :            : 
    1021                 :            : RTE_EXPORT_SYMBOL(rte_timer_next_ticks)
    1022                 :            : int64_t
    1023         [ #  # ]:          0 : rte_timer_next_ticks(void)
    1024                 :            : {
    1025                 :            :         unsigned int lcore_id = rte_lcore_id();
    1026                 :            :         struct rte_timer_data *timer_data;
    1027                 :            :         struct priv_timer *priv_timer;
    1028                 :            :         const struct rte_timer *tm;
    1029                 :            :         uint64_t cur_time;
    1030                 :            :         int64_t left = -ENOENT;
    1031                 :            : 
    1032                 :            :         TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
    1033                 :            : 
    1034                 :          0 :         priv_timer = timer_data->priv_timer;
    1035                 :            :         cur_time = rte_get_timer_cycles();
    1036                 :            : 
    1037                 :          0 :         rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
    1038                 :          0 :         tm = priv_timer[lcore_id].pending_head.sl_next[0];
    1039         [ #  # ]:          0 :         if (tm) {
    1040                 :          0 :                 left = tm->expire - cur_time;
    1041                 :            :                 if (left < 0)
    1042                 :            :                         left = 0;
    1043                 :            :         }
    1044                 :            :         rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
    1045                 :            : 
    1046                 :          0 :         return left;
    1047                 :            : }
    1048                 :            : 
    1049                 :            : /* dump statistics about timers */
    1050                 :            : static void
    1051                 :            : __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
    1052                 :            : {
    1053                 :            : #ifdef RTE_LIBRTE_TIMER_DEBUG
    1054                 :            :         struct rte_timer_debug_stats sum;
    1055                 :            :         unsigned lcore_id;
    1056                 :            :         struct priv_timer *priv_timer = timer_data->priv_timer;
    1057                 :            : 
    1058                 :            :         memset(&sum, 0, sizeof(sum));
    1059                 :            :         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
    1060                 :            :                 sum.reset += priv_timer[lcore_id].stats.reset;
    1061                 :            :                 sum.stop += priv_timer[lcore_id].stats.stop;
    1062                 :            :                 sum.manage += priv_timer[lcore_id].stats.manage;
    1063                 :            :                 sum.pending += priv_timer[lcore_id].stats.pending;
    1064                 :            :         }
    1065                 :            :         fprintf(f, "Timer statistics:\n");
    1066                 :            :         fprintf(f, "  reset = %"PRIu64"\n", sum.reset);
    1067                 :            :         fprintf(f, "  stop = %"PRIu64"\n", sum.stop);
    1068                 :            :         fprintf(f, "  manage = %"PRIu64"\n", sum.manage);
    1069                 :            :         fprintf(f, "  pending = %"PRIu64"\n", sum.pending);
    1070                 :            : #else
    1071                 :            :         fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
    1072                 :            : #endif
    1073                 :            : }
    1074                 :            : 
    1075                 :            : RTE_EXPORT_SYMBOL(rte_timer_dump_stats)
    1076                 :            : int
    1077                 :          1 : rte_timer_dump_stats(FILE *f)
    1078                 :            : {
    1079                 :          1 :         return rte_timer_alt_dump_stats(default_data_id, f);
    1080                 :            : }
    1081                 :            : 
    1082                 :            : RTE_EXPORT_SYMBOL(rte_timer_alt_dump_stats)
    1083                 :            : int
    1084                 :          1 : rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
    1085                 :            : {
    1086                 :            :         struct rte_timer_data *timer_data;
    1087                 :            : 
    1088         [ +  - ]:          1 :         TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
    1089                 :            : 
    1090                 :            :         __rte_timer_dump_stats(timer_data, f);
    1091                 :            : 
    1092                 :          1 :         return 0;
    1093                 :            : }

Generated by: LCOV version 1.14