Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright 2022 Mellanox Technologies, Ltd
3 : : */
4 : :
5 : : #ifndef _MLX5_HWS_CNT_H_
6 : : #define _MLX5_HWS_CNT_H_
7 : :
8 : : #include <rte_ring.h>
9 : : #include "mlx5_utils.h"
10 : : #include "mlx5_flow.h"
11 : :
12 : : /*
13 : : * HWS COUNTER ID's layout
14 : : * 3 2 1 0
15 : : * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
16 : : * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
17 : : * | T | | D | |
18 : : * ~ Y | | C | IDX ~
19 : : * | P | | S | |
20 : : * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
21 : : *
22 : : * Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
23 : : * Bit 25:24 = DCS index
24 : : * Bit 23:00 = IDX in this counter belonged DCS bulk.
25 : : */
26 : :
27 : : #define MLX5_HWS_CNT_DCS_IDX_OFFSET 24
28 : : #define MLX5_HWS_CNT_DCS_IDX_MASK 0x3
29 : : #define MLX5_HWS_CNT_IDX_MASK ((1UL << MLX5_HWS_CNT_DCS_IDX_OFFSET) - 1)
30 : :
31 : : #define MLX5_HWS_AGE_IDX_MASK (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)
32 : :
33 : : struct mlx5_hws_cnt_dcs {
34 : : struct mlx5dr_action *root_action; /* mlx5dr action used on root groups. */
35 : : struct mlx5dr_action *hws_action; /* mlx5dr action used on non-root groups. */
36 : : uint32_t batch_sz;
37 : : uint32_t iidx; /* internal index of first counter in this bulk. */
38 : : struct mlx5_devx_obj *obj;
39 : : };
40 : :
41 : : struct mlx5_hws_cnt_dcs_mng {
42 : : uint32_t batch_total;
43 : : struct mlx5_hws_cnt_dcs dcs[MLX5_HWS_CNT_DCS_NUM];
44 : : };
45 : :
46 : : union mlx5_hws_cnt_state {
47 : : RTE_ATOMIC(uint32_t) data;
48 : : struct {
49 : : uint32_t in_used:1;
50 : : /* Indicator whether this counter in used or in pool. */
51 : : uint32_t share:1;
52 : : /*
53 : : * share will be set to 1 when this counter is used as
54 : : * indirect action.
55 : : */
56 : : uint32_t age_idx:24;
57 : : /*
58 : : * When this counter uses for aging, it stores the index
59 : : * of AGE parameter. Otherwise, this index is zero.
60 : : */
61 : : };
62 : : };
63 : :
64 : : struct mlx5_hws_cnt {
65 : : struct flow_counter_stats reset;
66 : : union mlx5_hws_cnt_state cnt_state;
67 : : /* This struct is only meaningful when user own this counter. */
68 : : RTE_ATOMIC(uint32_t) query_gen_when_free;
69 : : /*
70 : : * When PMD own this counter (user put back counter to PMD
71 : : * counter pool, i.e), this field recorded value of counter
72 : : * pools query generation at time user release the counter.
73 : : */
74 : : };
75 : :
76 : : struct mlx5_hws_cnt_raw_data_mng {
77 : : struct flow_counter_stats *raw;
78 : : struct mlx5_pmd_mr mr;
79 : : };
80 : :
81 : : struct mlx5_hws_cache_param {
82 : : uint32_t size;
83 : : uint32_t q_num;
84 : : uint32_t fetch_sz;
85 : : uint32_t threshold;
86 : : uint32_t preload_sz;
87 : : };
88 : :
89 : : struct mlx5_hws_cnt_pool_cfg {
90 : : char *name;
91 : : uint32_t request_num;
92 : : uint32_t alloc_factor;
93 : : struct mlx5_hws_cnt_pool *host_cpool;
94 : : };
95 : :
96 : : struct mlx5_hws_cnt_pool_caches {
97 : : uint32_t fetch_sz;
98 : : uint32_t threshold;
99 : : uint32_t preload_sz;
100 : : uint32_t q_num;
101 : : struct rte_ring *qcache[];
102 : : };
103 : :
104 : : struct __rte_cache_aligned mlx5_hws_cnt_pool {
105 : : LIST_ENTRY(mlx5_hws_cnt_pool) next;
106 : : alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
107 : : alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
108 : : alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) query_gen;
109 : : struct mlx5_hws_cnt *pool;
110 : : struct mlx5_hws_cnt_raw_data_mng *raw_mng;
111 : : struct rte_ring *reuse_list;
112 : : struct rte_ring *free_list;
113 : : struct rte_ring *wait_reset_list;
114 : : struct mlx5_hws_cnt_pool_caches *cache;
115 : : uint64_t time_of_last_age_check;
116 : : struct mlx5_priv *priv;
117 : : };
118 : :
119 : : /* HWS AGE status. */
120 : : enum {
121 : : HWS_AGE_FREE, /* Initialized state. */
122 : : HWS_AGE_CANDIDATE, /* AGE assigned to flows. */
123 : : HWS_AGE_CANDIDATE_INSIDE_RING,
124 : : /*
125 : : * AGE assigned to flows but it still in ring. It was aged-out but the
126 : : * timeout was changed, so it in ring but still candidate.
127 : : */
128 : : HWS_AGE_AGED_OUT_REPORTED,
129 : : /*
130 : : * Aged-out, reported by rte_flow_get_q_aged_flows and wait for destroy.
131 : : */
132 : : HWS_AGE_AGED_OUT_NOT_REPORTED,
133 : : /*
134 : : * Aged-out, inside the aged-out ring.
135 : : * wait for rte_flow_get_q_aged_flows and destroy.
136 : : */
137 : : };
138 : :
139 : : /* HWS counter age parameter. */
140 : : struct __rte_cache_aligned __rte_packed_begin mlx5_hws_age_param {
141 : : RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
142 : : RTE_ATOMIC(uint32_t) sec_since_last_hit;
143 : : /* Time in seconds since last hit (atomically accessed). */
144 : : RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
145 : : uint64_t accumulator_last_hits;
146 : : /* Last total value of hits for comparing. */
147 : : uint64_t accumulator_hits;
148 : : /* Accumulator for hits coming from several counters. */
149 : : uint32_t accumulator_cnt;
150 : : /* Number counters which already updated the accumulator in this sec. */
151 : : uint32_t nb_cnts; /* Number counters used by this AGE. */
152 : : uint32_t queue_id; /* Queue id of the counter. */
153 : : cnt_id_t own_cnt_index;
154 : : /* Counter action created specifically for this AGE action. */
155 : : void *context; /* Flow AGE context. */
156 : : } __rte_packed_end;
157 : :
158 : :
159 : : /**
160 : : * Return the actual counter pool should be used in cross vHCA sharing mode.
161 : : * as index of raw/cnt pool.
162 : : *
163 : : * @param cnt_id
164 : : * The external counter id
165 : : * @return
166 : : * Internal index
167 : : */
168 : : static __rte_always_inline struct mlx5_hws_cnt_pool *
169 : : mlx5_hws_cnt_host_pool(struct mlx5_hws_cnt_pool *cpool)
170 : : {
171 [ # # # # : 0 : return cpool->cfg.host_cpool ? cpool->cfg.host_cpool : cpool;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
172 : : }
173 : :
174 : : /**
175 : : * Translate counter id into internal index (start from 0), which can be used
176 : : * as index of raw/cnt pool.
177 : : *
178 : : * @param cnt_id
179 : : * The external counter id
180 : : * @return
181 : : * Internal index
182 : : */
183 : : static __rte_always_inline uint32_t
184 : : mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
185 : : {
186 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
187 : 0 : uint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
188 : 0 : uint32_t offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
189 : :
190 : 0 : dcs_idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
191 : 0 : return (hpool->dcs_mng.dcs[dcs_idx].iidx + offset);
192 : : }
193 : :
194 : : /**
195 : : * Check if it's valid counter id.
196 : : */
197 : : static __rte_always_inline bool
198 : : mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
199 : : {
200 [ # # # # : 0 : return (cnt_id >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
# # # # #
# # # #
# ]
201 : : MLX5_INDIRECT_ACTION_TYPE_COUNT ? true : false;
202 : : }
203 : :
204 : : static __rte_always_inline void
205 : : mlx5_hws_cnt_set_age_idx(struct mlx5_hws_cnt *cnt, uint32_t value)
206 : : {
207 : : union mlx5_hws_cnt_state cnt_state;
208 : :
209 : 0 : cnt_state.data = rte_atomic_load_explicit(&cnt->cnt_state.data, rte_memory_order_acquire);
210 : 0 : cnt_state.age_idx = value;
211 : 0 : rte_atomic_store_explicit(&cnt->cnt_state.data, cnt_state.data, rte_memory_order_release);
212 : : }
213 : :
214 : : static __rte_always_inline void
215 : : mlx5_hws_cnt_set_all(struct mlx5_hws_cnt *cnt, uint32_t in_used, uint32_t share, uint32_t age_idx)
216 : : {
217 : : union mlx5_hws_cnt_state cnt_state;
218 : :
219 : 0 : cnt_state.in_used = !!in_used;
220 : 0 : cnt_state.share = !!share;
221 : 0 : cnt_state.age_idx = age_idx;
222 : 0 : rte_atomic_store_explicit(&cnt->cnt_state.data, cnt_state.data, rte_memory_order_relaxed);
223 : : }
224 : :
225 : : static __rte_always_inline void
226 : : mlx5_hws_cnt_get_all(struct mlx5_hws_cnt *cnt, uint32_t *in_used, uint32_t *share,
227 : : uint32_t *age_idx)
228 : : {
229 : : union mlx5_hws_cnt_state cnt_state;
230 : :
231 : 0 : cnt_state.data = rte_atomic_load_explicit(&cnt->cnt_state.data, rte_memory_order_acquire);
232 : : if (in_used != NULL)
233 : 0 : *in_used = cnt_state.in_used;
234 : : if (share != NULL)
235 : 0 : *share = cnt_state.share;
236 : : if (age_idx != NULL)
237 [ # # ]: 0 : *age_idx = cnt_state.age_idx;
238 : : }
239 : :
240 : : /**
241 : : * Generate Counter id from internal index.
242 : : *
243 : : * @param cpool
244 : : * The pointer to counter pool
245 : : * @param iidx
246 : : * The internal counter index.
247 : : *
248 : : * @return
249 : : * Counter id
250 : : */
251 : : static __rte_always_inline cnt_id_t
252 : : mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)
253 : : {
254 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
255 : : struct mlx5_hws_cnt_dcs_mng *dcs_mng = &hpool->dcs_mng;
256 : : uint32_t idx;
257 : : uint32_t offset;
258 : : cnt_id_t cnt_id;
259 : :
260 [ # # ]: 0 : for (idx = 0, offset = iidx; idx < dcs_mng->batch_total; idx++) {
261 [ # # ]: 0 : if (dcs_mng->dcs[idx].batch_sz <= offset)
262 : 0 : offset -= dcs_mng->dcs[idx].batch_sz;
263 : : else
264 : : break;
265 : : }
266 : : cnt_id = offset;
267 : 0 : cnt_id |= (idx << MLX5_HWS_CNT_DCS_IDX_OFFSET);
268 : : return (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
269 : 0 : MLX5_INDIRECT_ACTION_TYPE_OFFSET) | cnt_id;
270 : : }
271 : :
272 : : static __rte_always_inline void
273 : : __hws_cnt_query_raw(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
274 : : uint64_t *raw_pkts, uint64_t *raw_bytes)
275 : : {
276 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
277 : 0 : struct mlx5_hws_cnt_raw_data_mng *raw_mng = hpool->raw_mng;
278 : : struct flow_counter_stats s[2];
279 : : uint8_t i = 0x1;
280 : : size_t stat_sz = sizeof(s[0]);
281 : : uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
282 : :
283 : 0 : memcpy(&s[0], &raw_mng->raw[iidx], stat_sz);
284 : : do {
285 [ # # # # : 0 : memcpy(&s[i & 1], &raw_mng->raw[iidx], stat_sz);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
286 [ # # # # : 0 : if (memcmp(&s[0], &s[1], stat_sz) == 0) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
287 [ # # # # : 0 : *raw_pkts = rte_be_to_cpu_64(s[0].hits);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
288 [ # # # # : 0 : *raw_bytes = rte_be_to_cpu_64(s[0].bytes);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
289 : : break;
290 : : }
291 : 0 : i = ~i;
292 : : } while (1);
293 : : }
294 : :
295 : : /**
296 : : * Copy elements from one zero-copy ring to zero-copy ring in place.
297 : : *
298 : : * The input is a rte ring zero-copy data struct, which has two pointer.
299 : : * in case of the wrapper happened, the ptr2 will be meaningful.
300 : : *
301 : : * So this routine needs to consider the situation that the address given by
302 : : * source and destination could be both wrapped.
303 : : * First, calculate the first number of element needs to be copied until wrapped
304 : : * address, which could be in source or destination.
305 : : * Second, copy left number of element until second wrapped address. If in first
306 : : * step the wrapped address is source, then this time it must be in destination.
307 : : * and vice-versa.
308 : : * Third, copy all left number of element.
309 : : *
310 : : * In worst case, we need copy three pieces of continuous memory.
311 : : *
312 : : * @param zcdd
313 : : * A pointer to zero-copy data of destination ring.
314 : : * @param zcds
315 : : * A pointer to zero-copy data of source ring.
316 : : * @param n
317 : : * Number of elements to copy.
318 : : */
319 : : static __rte_always_inline void
320 : : __hws_cnt_r2rcpy(struct rte_ring_zc_data *zcdd, struct rte_ring_zc_data *zcds,
321 : : unsigned int n)
322 : : {
323 : : unsigned int n1, n2, n3;
324 : : void *s1, *s2, *s3;
325 : : void *d1, *d2, *d3;
326 : :
327 : : s1 = zcds->ptr1;
328 : : d1 = zcdd->ptr1;
329 : 0 : n1 = RTE_MIN(zcdd->n1, zcds->n1);
330 [ # # # # : 0 : if (zcds->n1 > n1) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
331 : 0 : n2 = zcds->n1 - n1;
332 : 0 : s2 = RTE_PTR_ADD(zcds->ptr1, sizeof(cnt_id_t) * n1);
333 : : d2 = zcdd->ptr2;
334 : 0 : n3 = n - n1 - n2;
335 : : s3 = zcds->ptr2;
336 : 0 : d3 = RTE_PTR_ADD(zcdd->ptr2, sizeof(cnt_id_t) * n2);
337 : : } else {
338 : 0 : n2 = zcdd->n1 - n1;
339 : : s2 = zcds->ptr2;
340 : 0 : d2 = RTE_PTR_ADD(zcdd->ptr1, sizeof(cnt_id_t) * n1);
341 : 0 : n3 = n - n1 - n2;
342 : 0 : s3 = RTE_PTR_ADD(zcds->ptr2, sizeof(cnt_id_t) * n2);
343 : : d3 = zcdd->ptr2;
344 : : }
345 [ # # # # : 0 : memcpy(d1, s1, n1 * sizeof(cnt_id_t));
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
346 [ # # # # : 0 : if (n2 != 0)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
347 : 0 : memcpy(d2, s2, n2 * sizeof(cnt_id_t));
348 [ # # # # : 0 : if (n3 != 0)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
349 : 0 : memcpy(d3, s3, n3 * sizeof(cnt_id_t));
350 : : }
351 : :
352 : : static __rte_always_inline int
353 : : mlx5_hws_cnt_pool_cache_flush(struct mlx5_hws_cnt_pool *cpool,
354 : : uint32_t queue_id)
355 : : {
356 : : unsigned int ret __rte_unused;
357 : : struct rte_ring_zc_data zcdr = {0};
358 : : struct rte_ring_zc_data zcdc = {0};
359 : : struct rte_ring *reset_list = NULL;
360 : 0 : struct rte_ring *qcache = cpool->cache->qcache[queue_id];
361 : 0 : uint32_t ring_size = rte_ring_count(qcache);
362 : :
363 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
364 : : ring_size, &zcdc, NULL);
365 : : MLX5_ASSERT(ret == ring_size);
366 [ # # # # : 0 : reset_list = cpool->wait_reset_list;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
367 : : ret = rte_ring_enqueue_zc_burst_elem_start(reset_list, sizeof(cnt_id_t),
368 : : ring_size, &zcdr, NULL);
369 : : MLX5_ASSERT(ret == ring_size);
370 : : __hws_cnt_r2rcpy(&zcdr, &zcdc, ring_size);
371 : : rte_ring_enqueue_zc_elem_finish(reset_list, ring_size);
372 : : rte_ring_dequeue_zc_elem_finish(qcache, ring_size);
373 : : return 0;
374 : : }
375 : :
376 : : static __rte_always_inline int
377 : : mlx5_hws_cnt_pool_cache_fetch(struct mlx5_hws_cnt_pool *cpool,
378 : : uint32_t queue_id)
379 : : {
380 : 0 : struct rte_ring *qcache = cpool->cache->qcache[queue_id];
381 : : struct rte_ring *free_list = NULL;
382 : : struct rte_ring *reuse_list = NULL;
383 : : struct rte_ring *list = NULL;
384 : : struct rte_ring_zc_data zcdf = {0};
385 : : struct rte_ring_zc_data zcdc = {0};
386 : : struct rte_ring_zc_data zcdu = {0};
387 : : struct rte_ring_zc_data zcds = {0};
388 : : struct mlx5_hws_cnt_pool_caches *cache = cpool->cache;
389 : : unsigned int ret, actual_fetch_size __rte_unused;
390 : :
391 : 0 : reuse_list = cpool->reuse_list;
392 : 0 : ret = rte_ring_dequeue_zc_burst_elem_start(reuse_list,
393 : : sizeof(cnt_id_t), cache->fetch_sz, &zcdu, NULL);
394 : : zcds = zcdu;
395 : : list = reuse_list;
396 [ # # # # : 0 : if (unlikely(ret == 0)) { /* no reuse counter. */
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
397 : : rte_ring_dequeue_zc_elem_finish(reuse_list, 0);
398 : 0 : free_list = cpool->free_list;
399 [ # # # # : 0 : ret = rte_ring_dequeue_zc_burst_elem_start(free_list,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
400 : : sizeof(cnt_id_t),
401 : : cache->fetch_sz,
402 : : &zcdf, NULL);
403 : : zcds = zcdf;
404 : : list = free_list;
405 [ # # # # : 0 : if (unlikely(ret == 0)) { /* no free counter. */
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
406 : : rte_ring_dequeue_zc_elem_finish(free_list, 0);
407 [ # # # # : 0 : if (rte_ring_count(cpool->wait_reset_list))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
408 : : return -EAGAIN;
409 : 0 : return -ENOENT;
410 : : }
411 : : }
412 : : actual_fetch_size = ret;
413 : : ret = rte_ring_enqueue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
414 : : ret, &zcdc, NULL);
415 : : MLX5_ASSERT(ret == actual_fetch_size);
416 : : __hws_cnt_r2rcpy(&zcdc, &zcds, ret);
417 : : rte_ring_dequeue_zc_elem_finish(list, ret);
418 : : rte_ring_enqueue_zc_elem_finish(qcache, ret);
419 : : return 0;
420 : : }
421 : :
422 : : static __rte_always_inline int
423 : : __mlx5_hws_cnt_pool_enqueue_revert(struct rte_ring *r, unsigned int n,
424 : : struct rte_ring_zc_data *zcd)
425 : : {
426 : : uint32_t current_head = 0;
427 : : uint32_t revert2head = 0;
428 : :
429 : : MLX5_ASSERT(r->prod.sync_type == RTE_RING_SYNC_ST);
430 : : MLX5_ASSERT(r->cons.sync_type == RTE_RING_SYNC_ST);
431 : 0 : current_head = rte_atomic_load_explicit(&r->prod.head, rte_memory_order_relaxed);
432 : : MLX5_ASSERT(n <= r->capacity);
433 : : MLX5_ASSERT(n <= rte_ring_count(r));
434 : 0 : revert2head = current_head - n;
435 : 0 : r->prod.head = revert2head; /* This ring should be SP. */
436 : : __rte_ring_get_elem_addr(r, revert2head, sizeof(cnt_id_t), n,
437 : : &zcd->ptr1, &zcd->n1, &zcd->ptr2);
438 : : /* Update tail */
439 [ # # # ]: 0 : rte_atomic_store_explicit(&r->prod.tail, revert2head, rte_memory_order_release);
440 : : return n;
441 : : }
442 : :
443 : : /**
444 : : * Put one counter back in the mempool.
445 : : *
446 : : * @param cpool
447 : : * A pointer to the counter pool structure.
448 : : * @param queue
449 : : * A pointer to HWS queue. If null, it means put into common pool.
450 : : * @param cnt_id
451 : : * A counter id to be added.
452 : : */
453 : : static __rte_always_inline void
454 : : mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
455 : : cnt_id_t *cnt_id)
456 : : {
457 : : unsigned int ret = 0;
458 : : struct mlx5_hws_cnt_pool *hpool;
459 : : struct rte_ring_zc_data zcdc = {0};
460 : : struct rte_ring_zc_data zcdr = {0};
461 : : struct rte_ring *qcache = NULL;
462 : : unsigned int wb_num = 0; /* cache write-back number. */
463 : : uint32_t iidx;
464 : :
465 : : hpool = mlx5_hws_cnt_host_pool(cpool);
466 [ # # # # ]: 0 : iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
467 : 0 : mlx5_hws_cnt_set_all(&hpool->pool[iidx], 0, 0, 0);
468 : 0 : rte_atomic_store_explicit(&hpool->pool[iidx].query_gen_when_free,
469 : : rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed),
470 : : rte_memory_order_relaxed);
471 [ # # # # ]: 0 : if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
472 : 0 : qcache = hpool->cache->qcache[*queue];
473 [ # # ]: 0 : if (unlikely(qcache == NULL)) {
474 [ # # # # : 0 : ret = rte_ring_enqueue_elem(hpool->wait_reset_list, cnt_id,
# # # # #
# # # # #
# ]
475 : : sizeof(cnt_id_t));
476 : : MLX5_ASSERT(ret == 0);
477 : : return;
478 : : }
479 : : ret = rte_ring_enqueue_burst_elem(qcache, cnt_id, sizeof(cnt_id_t), 1,
480 : : NULL);
481 [ # # ]: 0 : if (unlikely(ret == 0)) { /* cache is full. */
482 [ # # ]: 0 : struct rte_ring *reset_list = cpool->wait_reset_list;
483 : :
484 [ # # ]: 0 : wb_num = rte_ring_count(qcache) - cpool->cache->threshold;
485 : : MLX5_ASSERT(wb_num < rte_ring_count(qcache));
486 : : __mlx5_hws_cnt_pool_enqueue_revert(qcache, wb_num, &zcdc);
487 : : ret = rte_ring_enqueue_zc_burst_elem_start(reset_list,
488 : : sizeof(cnt_id_t),
489 : : wb_num, &zcdr, NULL);
490 : : MLX5_ASSERT(ret == wb_num);
491 : : __hws_cnt_r2rcpy(&zcdr, &zcdc, ret);
492 : : rte_ring_enqueue_zc_elem_finish(reset_list, ret);
493 : : /* write-back THIS counter too */
494 : : ret = rte_ring_enqueue_burst_elem(reset_list, cnt_id,
495 : : sizeof(cnt_id_t), 1, NULL);
496 : : }
497 : : MLX5_ASSERT(ret == 1);
498 : : }
499 : :
500 : : /**
501 : : * Get one counter from the pool.
502 : : *
503 : : * If @param queue is not null, objects will be retrieved first from queue's
504 : : * cache, subsequently from the common pool. Note that it can return -ENOENT
505 : : * when the local cache and common pool are empty, even if cache from other
506 : : * queue are full.
507 : : *
508 : : * @param cntp
509 : : * A pointer to the counter pool structure.
510 : : * @param queue
511 : : * A pointer to HWS queue. If null, it means fetch from common pool.
512 : : * @param cnt_id
513 : : * A pointer to a cnt_id_t * pointer (counter id) that will be filled.
514 : : * @param age_idx
515 : : * Index of AGE parameter using this counter, zero means there is no such AGE.
516 : : *
517 : : * @return
518 : : * - 0: Success; objects taken.
519 : : * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
520 : : * - -EAGAIN: counter is not ready; try again.
521 : : */
522 : : static __rte_always_inline int
523 : : mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
524 : : cnt_id_t *cnt_id, uint32_t age_idx, uint32_t shared)
525 : : {
526 : : unsigned int ret;
527 : : struct rte_ring_zc_data zcdc = {0};
528 : : struct rte_ring *qcache = NULL;
529 : : uint32_t iidx, query_gen = 0;
530 [ # # # # : 0 : cnt_id_t tmp_cid = 0;
# # # # #
# # # #
# ]
531 : :
532 [ # # # # : 0 : if (likely(queue != NULL && cpool->cfg.host_cpool == NULL))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
533 : 0 : qcache = cpool->cache->qcache[*queue];
534 [ # # # # : 0 : if (unlikely(qcache == NULL)) {
# # # # #
# # # # #
# # # # #
# ]
535 : : cpool = mlx5_hws_cnt_host_pool(cpool);
536 [ # # # # : 0 : ret = rte_ring_dequeue_elem(cpool->reuse_list, &tmp_cid,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
537 : : sizeof(cnt_id_t));
538 [ # # # # : 0 : if (unlikely(ret != 0)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
539 [ # # # # : 0 : ret = rte_ring_dequeue_elem(cpool->free_list, &tmp_cid,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
540 : : sizeof(cnt_id_t));
541 [ # # # # : 0 : if (unlikely(ret != 0)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
542 [ # # # # : 0 : if (rte_ring_count(cpool->wait_reset_list))
# # # # #
# ]
543 : : return -EAGAIN;
544 : : return -ENOENT;
545 : : }
546 : : }
547 [ # # # # : 0 : *cnt_id = tmp_cid;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
548 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
549 : : __hws_cnt_query_raw(cpool, *cnt_id,
550 : : &cpool->pool[iidx].reset.hits,
551 : 0 : &cpool->pool[iidx].reset.bytes);
552 : : mlx5_hws_cnt_set_all(&cpool->pool[iidx], 1, shared, age_idx);
553 : : return 0;
554 : : }
555 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,
556 : : &zcdc, NULL);
557 [ # # # # : 0 : if (unlikely(ret == 0)) { /* local cache is empty. */
# # # # #
# # # # #
# # # # #
# ]
558 : : rte_ring_dequeue_zc_elem_finish(qcache, 0);
559 : : /* let's fetch from global free list. */
560 [ # # # # : 0 : ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
561 [ # # # # : 0 : if (unlikely(ret != 0))
# # # # #
# # # # #
# # # # #
# ]
562 : : return ret;
563 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
564 : : sizeof(cnt_id_t), 1,
565 : : &zcdc, NULL);
566 : : MLX5_ASSERT(ret == 1);
567 : : }
568 : : /* get one from local cache. */
569 [ # # # # : 0 : *cnt_id = (*(cnt_id_t *)zcdc.ptr1);
# # # # #
# # # # #
# # # # #
# ]
570 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
571 : 0 : query_gen = rte_atomic_load_explicit(&cpool->pool[iidx].query_gen_when_free,
572 : : rte_memory_order_relaxed);
573 : : /* counter is waiting to reset. */
574 [ # # # # : 0 : if (rte_atomic_load_explicit(&cpool->query_gen, rte_memory_order_relaxed) == query_gen) {
# # # # #
# # # # #
# # # # #
# ]
575 : : rte_ring_dequeue_zc_elem_finish(qcache, 0);
576 : : /* write-back counter to reset list. */
577 : 0 : mlx5_hws_cnt_pool_cache_flush(cpool, *queue);
578 : : /* let's fetch from global free list. */
579 [ # # # # : 0 : ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
580 [ # # # # : 0 : if (unlikely(ret != 0))
# # # # #
# # # # #
# # # # #
# ]
581 : : return ret;
582 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
583 : : sizeof(cnt_id_t), 1,
584 : : &zcdc, NULL);
585 : : MLX5_ASSERT(ret == 1);
586 [ # # # # : 0 : *cnt_id = *(cnt_id_t *)zcdc.ptr1;
# # # # #
# # # # #
# # # # #
# ]
587 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
588 : : }
589 : : __hws_cnt_query_raw(cpool, *cnt_id, &cpool->pool[iidx].reset.hits,
590 [ # # # # : 0 : &cpool->pool[iidx].reset.bytes);
# # # # #
# # # # #
# # # # #
# ]
591 : : rte_ring_dequeue_zc_elem_finish(qcache, 1);
592 : 0 : mlx5_hws_cnt_set_all(&cpool->pool[iidx], 1, shared, age_idx);
593 : : return 0;
594 : : }
595 : :
596 : : /**
597 : : * Decide if the given queue can be used to perform counter allocation/deallcation
598 : : * based on counter configuration
599 : : *
600 : : * @param[in] priv
601 : : * Pointer to the port private data structure.
602 : : * @param[in] queue
603 : : * Pointer to the queue index.
604 : : *
605 : : * @return
606 : : * @p queue if cache related to the queue can be used. NULL otherwise.
607 : : */
608 : : static __rte_always_inline uint32_t *
609 : : mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue)
610 : : {
611 [ # # # # : 0 : if (priv && priv->hws_cpool) {
# # # # #
# # # # #
# # # # #
# # # ]
612 : : /* Do not use queue cache if counter pool is shared. */
613 [ # # # # : 0 : if (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
614 : : return NULL;
615 : : /* Do not use queue cache if counter cache is disabled. */
616 [ # # # # : 0 : if (priv->hws_cpool->cache == NULL)
# # # # #
# # # # #
# # # # #
# # # ]
617 : : return NULL;
618 : 0 : return queue;
619 : : }
620 : : /* This case should not be reached if counter pool was successfully configured. */
621 : : MLX5_ASSERT(false);
622 : : return NULL;
623 : : }
624 : :
625 : : static __rte_always_inline unsigned int
626 : : mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool)
627 : : {
628 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
629 : :
630 [ # # # # ]: 0 : return rte_ring_get_capacity(hpool->free_list);
631 : : }
632 : :
633 : : static __rte_always_inline int
634 : : mlx5_hws_cnt_pool_get_action_offset(struct mlx5_hws_cnt_pool *cpool,
635 : : cnt_id_t cnt_id, struct mlx5dr_action **action,
636 : : uint32_t *offset, bool is_root)
637 : : {
638 : 0 : uint8_t idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
639 : :
640 : 0 : idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
641 [ # # # # : 0 : if (likely(!is_root)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
642 : 0 : *action = cpool->dcs_mng.dcs[idx].hws_action;
643 : : } else {
644 : : /*
645 : : * Any table using counter on root group should be rejected on validation
646 : : * when counter on root is not supported.
647 : : */
648 : : MLX5_ASSERT(cpool->dcs_mng.dcs[idx].root_action != NULL);
649 : 0 : *action = cpool->dcs_mng.dcs[idx].root_action;
650 : : }
651 : 0 : *offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
652 : : return 0;
653 : : }
654 : :
655 : : static __rte_always_inline int
656 : : mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
657 : : uint32_t age_idx)
658 : : {
659 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
660 : :
661 : : return mlx5_hws_cnt_pool_get(hpool, NULL, cnt_id, age_idx, 1);
662 : : }
663 : :
664 : : static __rte_always_inline void
665 : : mlx5_hws_cnt_shared_put(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
666 : : {
667 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
668 : :
669 : : mlx5_hws_cnt_pool_put(hpool, NULL, cnt_id);
670 : : }
671 : :
672 : : static __rte_always_inline bool
673 : : mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
674 : : {
675 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
676 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
677 : : uint32_t share;
678 : :
679 [ # # ]: 0 : mlx5_hws_cnt_get_all(&hpool->pool[iidx], NULL, &share, NULL);
680 : : return !!share;
681 : : }
682 : :
683 : : static __rte_always_inline void
684 : : mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
685 : : uint32_t age_idx)
686 : : {
687 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
688 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
689 : :
690 : : MLX5_ASSERT(hpool->pool[iidx].cnt_state.share);
691 : 0 : mlx5_hws_cnt_set_age_idx(&hpool->pool[iidx], age_idx);
692 : 0 : }
693 : :
694 : : static __rte_always_inline uint32_t
695 : : mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
696 : : {
697 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
698 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
699 : : uint32_t age_idx, share;
700 : :
701 [ # # # # : 0 : mlx5_hws_cnt_get_all(&hpool->pool[iidx], NULL, &share, &age_idx);
# # # # #
# # # ]
702 : : MLX5_ASSERT(share);
703 : : return age_idx;
704 : : }
705 : :
706 : : static __rte_always_inline cnt_id_t
707 : : mlx5_hws_age_cnt_get(struct mlx5_priv *priv, struct mlx5_hws_age_param *param,
708 : : uint32_t age_idx)
709 : : {
710 [ # # # # : 0 : if (!param->own_cnt_index) {
# # # # #
# ]
711 : : /* Create indirect counter one for internal usage. */
712 [ # # # # : 0 : if (mlx5_hws_cnt_shared_get(priv->hws_cpool,
# # # # #
# ]
713 : : ¶m->own_cnt_index, age_idx) < 0)
714 : : return 0;
715 : 0 : param->nb_cnts++;
716 : : }
717 [ # # # # : 0 : return param->own_cnt_index;
# # # # #
# ]
718 : : }
719 : :
720 : : static __rte_always_inline void
721 : : mlx5_hws_age_nb_cnt_increase(struct mlx5_priv *priv, uint32_t age_idx)
722 : : {
723 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
724 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
725 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
726 : :
727 : : MLX5_ASSERT(param != NULL);
728 : 0 : param->nb_cnts++;
729 : 0 : }
730 : :
731 : : static __rte_always_inline void
732 : : mlx5_hws_age_nb_cnt_decrease(struct mlx5_priv *priv, uint32_t age_idx)
733 : : {
734 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
735 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
736 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
737 : :
738 [ # # # # ]: 0 : if (param != NULL)
739 : 0 : param->nb_cnts--;
740 : : }
741 : :
742 : : static __rte_always_inline bool
743 : : mlx5_hws_age_is_indirect(uint32_t age_idx)
744 : : {
745 [ # # # # ]: 0 : return (age_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
746 : : MLX5_INDIRECT_ACTION_TYPE_AGE ? true : false;
747 : : }
748 : :
749 : : /* init HWS counter pool. */
750 : : int
751 : : mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh);
752 : :
753 : : void
754 : : mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh);
755 : :
756 : : int
757 : : mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
758 : : uint32_t nb_counters, uint16_t nb_queue,
759 : : struct mlx5_hws_cnt_pool *chost, struct rte_flow_error *error);
760 : :
761 : : void
762 : : mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,
763 : : struct mlx5_hws_cnt_pool *cpool);
764 : :
765 : : int
766 : : mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh,
767 : : struct rte_flow_error *error);
768 : :
769 : : void
770 : : mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh);
771 : :
772 : : int
773 : : mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
774 : : struct rte_flow_error *error);
775 : :
776 : : uint32_t
777 : : mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
778 : : bool shared, const struct rte_flow_action_age *age,
779 : : uint32_t flow_idx, struct rte_flow_error *error);
780 : :
781 : : int
782 : : mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
783 : : const void *update, struct rte_flow_error *error);
784 : :
785 : : void *
786 : : mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx);
787 : :
788 : : int
789 : : mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
790 : : uint32_t nb_aging_objects,
791 : : uint16_t nb_queues,
792 : : bool strict_queue);
793 : :
794 : : void
795 : : mlx5_hws_age_pool_destroy(struct mlx5_priv *priv);
796 : :
797 : : #endif /* _MLX5_HWS_CNT_H_ */
|