Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright 2022 Mellanox Technologies, Ltd
3 : : */
4 : :
5 : : #ifndef _MLX5_HWS_CNT_H_
6 : : #define _MLX5_HWS_CNT_H_
7 : :
8 : : #include <rte_ring.h>
9 : : #include "mlx5_utils.h"
10 : : #include "mlx5_flow.h"
11 : :
12 : : /*
13 : : * HWS COUNTER ID's layout
14 : : * 3 2 1 0
15 : : * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
16 : : * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
17 : : * | T | | D | |
18 : : * ~ Y | | C | IDX ~
19 : : * | P | | S | |
20 : : * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
21 : : *
22 : : * Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
23 : : * Bit 25:24 = DCS index
24 : : * Bit 23:00 = IDX in this counter belonged DCS bulk.
25 : : */
26 : :
27 : : #define MLX5_HWS_CNT_DCS_IDX_OFFSET 24
28 : : #define MLX5_HWS_CNT_DCS_IDX_MASK 0x3
29 : : #define MLX5_HWS_CNT_IDX_MASK ((1UL << MLX5_HWS_CNT_DCS_IDX_OFFSET) - 1)
30 : :
31 : : #define MLX5_HWS_AGE_IDX_MASK (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)
32 : :
33 : : struct mlx5_hws_cnt_dcs {
34 : : void *dr_action;
35 : : uint32_t batch_sz;
36 : : uint32_t iidx; /* internal index of first counter in this bulk. */
37 : : struct mlx5_devx_obj *obj;
38 : : };
39 : :
40 : : struct mlx5_hws_cnt_dcs_mng {
41 : : uint32_t batch_total;
42 : : struct mlx5_hws_cnt_dcs dcs[MLX5_HWS_CNT_DCS_NUM];
43 : : };
44 : :
45 : : union mlx5_hws_cnt_state {
46 : : alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t)data;
47 : : struct {
48 : : uint32_t in_used:1;
49 : : /* Indicator whether this counter in used or in pool. */
50 : : uint32_t share:1;
51 : : /*
52 : : * share will be set to 1 when this counter is used as
53 : : * indirect action.
54 : : */
55 : : uint32_t age_idx:24;
56 : : /*
57 : : * When this counter uses for aging, it stores the index
58 : : * of AGE parameter. Otherwise, this index is zero.
59 : : */
60 : : };
61 : : };
62 : :
63 : : struct mlx5_hws_cnt {
64 : : struct flow_counter_stats reset;
65 : : union mlx5_hws_cnt_state cnt_state;
66 : : /* This struct is only meaningful when user own this counter. */
67 : : alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t)query_gen_when_free;
68 : : /*
69 : : * When PMD own this counter (user put back counter to PMD
70 : : * counter pool, i.e), this field recorded value of counter
71 : : * pools query generation at time user release the counter.
72 : : */
73 : : };
74 : :
75 : : struct mlx5_hws_cnt_raw_data_mng {
76 : : struct flow_counter_stats *raw;
77 : : struct mlx5_pmd_mr mr;
78 : : };
79 : :
80 : : struct mlx5_hws_cache_param {
81 : : uint32_t size;
82 : : uint32_t q_num;
83 : : uint32_t fetch_sz;
84 : : uint32_t threshold;
85 : : uint32_t preload_sz;
86 : : };
87 : :
88 : : struct mlx5_hws_cnt_pool_cfg {
89 : : char *name;
90 : : uint32_t request_num;
91 : : uint32_t alloc_factor;
92 : : struct mlx5_hws_cnt_pool *host_cpool;
93 : : };
94 : :
95 : : struct mlx5_hws_cnt_pool_caches {
96 : : uint32_t fetch_sz;
97 : : uint32_t threshold;
98 : : uint32_t preload_sz;
99 : : uint32_t q_num;
100 : : struct rte_ring *qcache[];
101 : : };
102 : :
103 : : struct __rte_cache_aligned mlx5_hws_cnt_pool {
104 : : LIST_ENTRY(mlx5_hws_cnt_pool) next;
105 : : alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
106 : : alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
107 : : alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) query_gen;
108 : : struct mlx5_hws_cnt *pool;
109 : : struct mlx5_hws_cnt_raw_data_mng *raw_mng;
110 : : struct rte_ring *reuse_list;
111 : : struct rte_ring *free_list;
112 : : struct rte_ring *wait_reset_list;
113 : : struct mlx5_hws_cnt_pool_caches *cache;
114 : : uint64_t time_of_last_age_check;
115 : : struct mlx5_priv *priv;
116 : : };
117 : :
118 : : /* HWS AGE status. */
119 : : enum {
120 : : HWS_AGE_FREE, /* Initialized state. */
121 : : HWS_AGE_CANDIDATE, /* AGE assigned to flows. */
122 : : HWS_AGE_CANDIDATE_INSIDE_RING,
123 : : /*
124 : : * AGE assigned to flows but it still in ring. It was aged-out but the
125 : : * timeout was changed, so it in ring but still candidate.
126 : : */
127 : : HWS_AGE_AGED_OUT_REPORTED,
128 : : /*
129 : : * Aged-out, reported by rte_flow_get_q_aged_flows and wait for destroy.
130 : : */
131 : : HWS_AGE_AGED_OUT_NOT_REPORTED,
132 : : /*
133 : : * Aged-out, inside the aged-out ring.
134 : : * wait for rte_flow_get_q_aged_flows and destroy.
135 : : */
136 : : };
137 : :
138 : : /* HWS counter age parameter. */
139 : : struct __rte_cache_aligned __rte_packed_begin mlx5_hws_age_param {
140 : : RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
141 : : RTE_ATOMIC(uint32_t) sec_since_last_hit;
142 : : /* Time in seconds since last hit (atomically accessed). */
143 : : RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
144 : : uint64_t accumulator_last_hits;
145 : : /* Last total value of hits for comparing. */
146 : : uint64_t accumulator_hits;
147 : : /* Accumulator for hits coming from several counters. */
148 : : uint32_t accumulator_cnt;
149 : : /* Number counters which already updated the accumulator in this sec. */
150 : : uint32_t nb_cnts; /* Number counters used by this AGE. */
151 : : uint32_t queue_id; /* Queue id of the counter. */
152 : : cnt_id_t own_cnt_index;
153 : : /* Counter action created specifically for this AGE action. */
154 : : void *context; /* Flow AGE context. */
155 : : } __rte_packed_end;
156 : :
157 : :
158 : : /**
159 : : * Return the actual counter pool should be used in cross vHCA sharing mode.
160 : : * as index of raw/cnt pool.
161 : : *
162 : : * @param cnt_id
163 : : * The external counter id
164 : : * @return
165 : : * Internal index
166 : : */
167 : : static __rte_always_inline struct mlx5_hws_cnt_pool *
168 : : mlx5_hws_cnt_host_pool(struct mlx5_hws_cnt_pool *cpool)
169 : : {
170 [ # # # # : 0 : return cpool->cfg.host_cpool ? cpool->cfg.host_cpool : cpool;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
171 : : }
172 : :
173 : : /**
174 : : * Translate counter id into internal index (start from 0), which can be used
175 : : * as index of raw/cnt pool.
176 : : *
177 : : * @param cnt_id
178 : : * The external counter id
179 : : * @return
180 : : * Internal index
181 : : */
182 : : static __rte_always_inline uint32_t
183 : : mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
184 : : {
185 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
186 : 0 : uint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
187 : 0 : uint32_t offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
188 : :
189 : 0 : dcs_idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
190 : 0 : return (hpool->dcs_mng.dcs[dcs_idx].iidx + offset);
191 : : }
192 : :
193 : : /**
194 : : * Check if it's valid counter id.
195 : : */
196 : : static __rte_always_inline bool
197 : : mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
198 : : {
199 [ # # # # : 0 : return (cnt_id >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
# # # # #
# # # #
# ]
200 : : MLX5_INDIRECT_ACTION_TYPE_COUNT ? true : false;
201 : : }
202 : :
203 : : static __rte_always_inline void
204 : : mlx5_hws_cnt_set_age_idx(struct mlx5_hws_cnt *cnt, uint32_t value)
205 : : {
206 : : union mlx5_hws_cnt_state cnt_state;
207 : :
208 : 0 : cnt_state.data = rte_atomic_load_explicit(&cnt->cnt_state.data, rte_memory_order_acquire);
209 : 0 : cnt_state.age_idx = value;
210 : 0 : rte_atomic_store_explicit(&cnt->cnt_state.data, cnt_state.data, rte_memory_order_release);
211 : : }
212 : :
213 : : static __rte_always_inline void
214 : : mlx5_hws_cnt_set_all(struct mlx5_hws_cnt *cnt, uint32_t in_used, uint32_t share, uint32_t age_idx)
215 : : {
216 : : union mlx5_hws_cnt_state cnt_state;
217 : :
218 : 0 : cnt_state.in_used = !!in_used;
219 : 0 : cnt_state.share = !!share;
220 : 0 : cnt_state.age_idx = age_idx;
221 : 0 : rte_atomic_store_explicit(&cnt->cnt_state.data, cnt_state.data, rte_memory_order_relaxed);
222 : : }
223 : :
224 : : static __rte_always_inline void
225 : : mlx5_hws_cnt_get_all(struct mlx5_hws_cnt *cnt, uint32_t *in_used, uint32_t *share,
226 : : uint32_t *age_idx)
227 : : {
228 : : union mlx5_hws_cnt_state cnt_state;
229 : :
230 : 0 : cnt_state.data = rte_atomic_load_explicit(&cnt->cnt_state.data, rte_memory_order_acquire);
231 : : if (in_used != NULL)
232 : 0 : *in_used = cnt_state.in_used;
233 : : if (share != NULL)
234 : 0 : *share = cnt_state.share;
235 : : if (age_idx != NULL)
236 [ # # ]: 0 : *age_idx = cnt_state.age_idx;
237 : : }
238 : :
239 : : /**
240 : : * Generate Counter id from internal index.
241 : : *
242 : : * @param cpool
243 : : * The pointer to counter pool
244 : : * @param iidx
245 : : * The internal counter index.
246 : : *
247 : : * @return
248 : : * Counter id
249 : : */
250 : : static __rte_always_inline cnt_id_t
251 : : mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)
252 : : {
253 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
254 : : struct mlx5_hws_cnt_dcs_mng *dcs_mng = &hpool->dcs_mng;
255 : : uint32_t idx;
256 : : uint32_t offset;
257 : : cnt_id_t cnt_id;
258 : :
259 [ # # ]: 0 : for (idx = 0, offset = iidx; idx < dcs_mng->batch_total; idx++) {
260 [ # # ]: 0 : if (dcs_mng->dcs[idx].batch_sz <= offset)
261 : 0 : offset -= dcs_mng->dcs[idx].batch_sz;
262 : : else
263 : : break;
264 : : }
265 : : cnt_id = offset;
266 : 0 : cnt_id |= (idx << MLX5_HWS_CNT_DCS_IDX_OFFSET);
267 : : return (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
268 : 0 : MLX5_INDIRECT_ACTION_TYPE_OFFSET) | cnt_id;
269 : : }
270 : :
271 : : static __rte_always_inline void
272 : : __hws_cnt_query_raw(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
273 : : uint64_t *raw_pkts, uint64_t *raw_bytes)
274 : : {
275 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
276 : 0 : struct mlx5_hws_cnt_raw_data_mng *raw_mng = hpool->raw_mng;
277 : : struct flow_counter_stats s[2];
278 : : uint8_t i = 0x1;
279 : : size_t stat_sz = sizeof(s[0]);
280 : : uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
281 : :
282 : 0 : memcpy(&s[0], &raw_mng->raw[iidx], stat_sz);
283 : : do {
284 [ # # # # : 0 : memcpy(&s[i & 1], &raw_mng->raw[iidx], stat_sz);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
285 [ # # # # : 0 : if (memcmp(&s[0], &s[1], stat_sz) == 0) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
286 [ # # # # : 0 : *raw_pkts = rte_be_to_cpu_64(s[0].hits);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
287 [ # # # # : 0 : *raw_bytes = rte_be_to_cpu_64(s[0].bytes);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
288 : : break;
289 : : }
290 : 0 : i = ~i;
291 : : } while (1);
292 : : }
293 : :
294 : : /**
295 : : * Copy elements from one zero-copy ring to zero-copy ring in place.
296 : : *
297 : : * The input is a rte ring zero-copy data struct, which has two pointer.
298 : : * in case of the wrapper happened, the ptr2 will be meaningful.
299 : : *
300 : : * So this routine needs to consider the situation that the address given by
301 : : * source and destination could be both wrapped.
302 : : * First, calculate the first number of element needs to be copied until wrapped
303 : : * address, which could be in source or destination.
304 : : * Second, copy left number of element until second wrapped address. If in first
305 : : * step the wrapped address is source, then this time it must be in destination.
306 : : * and vice-versa.
307 : : * Third, copy all left number of element.
308 : : *
309 : : * In worst case, we need copy three pieces of continuous memory.
310 : : *
311 : : * @param zcdd
312 : : * A pointer to zero-copy data of destination ring.
313 : : * @param zcds
314 : : * A pointer to zero-copy data of source ring.
315 : : * @param n
316 : : * Number of elements to copy.
317 : : */
318 : : static __rte_always_inline void
319 : : __hws_cnt_r2rcpy(struct rte_ring_zc_data *zcdd, struct rte_ring_zc_data *zcds,
320 : : unsigned int n)
321 : : {
322 : : unsigned int n1, n2, n3;
323 : : void *s1, *s2, *s3;
324 : : void *d1, *d2, *d3;
325 : :
326 : : s1 = zcds->ptr1;
327 : : d1 = zcdd->ptr1;
328 : 0 : n1 = RTE_MIN(zcdd->n1, zcds->n1);
329 [ # # # # : 0 : if (zcds->n1 > n1) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
330 : 0 : n2 = zcds->n1 - n1;
331 : 0 : s2 = RTE_PTR_ADD(zcds->ptr1, sizeof(cnt_id_t) * n1);
332 : : d2 = zcdd->ptr2;
333 : 0 : n3 = n - n1 - n2;
334 : : s3 = zcds->ptr2;
335 : 0 : d3 = RTE_PTR_ADD(zcdd->ptr2, sizeof(cnt_id_t) * n2);
336 : : } else {
337 : 0 : n2 = zcdd->n1 - n1;
338 : : s2 = zcds->ptr2;
339 : 0 : d2 = RTE_PTR_ADD(zcdd->ptr1, sizeof(cnt_id_t) * n1);
340 : 0 : n3 = n - n1 - n2;
341 : 0 : s3 = RTE_PTR_ADD(zcds->ptr2, sizeof(cnt_id_t) * n2);
342 : : d3 = zcdd->ptr2;
343 : : }
344 [ # # # # : 0 : memcpy(d1, s1, n1 * sizeof(cnt_id_t));
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
345 [ # # # # : 0 : if (n2 != 0)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
346 : 0 : memcpy(d2, s2, n2 * sizeof(cnt_id_t));
347 [ # # # # : 0 : if (n3 != 0)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
348 : 0 : memcpy(d3, s3, n3 * sizeof(cnt_id_t));
349 : : }
350 : :
351 : : static __rte_always_inline int
352 : : mlx5_hws_cnt_pool_cache_flush(struct mlx5_hws_cnt_pool *cpool,
353 : : uint32_t queue_id)
354 : : {
355 : : unsigned int ret __rte_unused;
356 : : struct rte_ring_zc_data zcdr = {0};
357 : : struct rte_ring_zc_data zcdc = {0};
358 : : struct rte_ring *reset_list = NULL;
359 : 0 : struct rte_ring *qcache = cpool->cache->qcache[queue_id];
360 : 0 : uint32_t ring_size = rte_ring_count(qcache);
361 : :
362 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
363 : : ring_size, &zcdc, NULL);
364 : : MLX5_ASSERT(ret == ring_size);
365 [ # # # # : 0 : reset_list = cpool->wait_reset_list;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
366 : : ret = rte_ring_enqueue_zc_burst_elem_start(reset_list, sizeof(cnt_id_t),
367 : : ring_size, &zcdr, NULL);
368 : : MLX5_ASSERT(ret == ring_size);
369 : : __hws_cnt_r2rcpy(&zcdr, &zcdc, ring_size);
370 : : rte_ring_enqueue_zc_elem_finish(reset_list, ring_size);
371 : : rte_ring_dequeue_zc_elem_finish(qcache, ring_size);
372 : : return 0;
373 : : }
374 : :
375 : : static __rte_always_inline int
376 : : mlx5_hws_cnt_pool_cache_fetch(struct mlx5_hws_cnt_pool *cpool,
377 : : uint32_t queue_id)
378 : : {
379 : 0 : struct rte_ring *qcache = cpool->cache->qcache[queue_id];
380 : : struct rte_ring *free_list = NULL;
381 : : struct rte_ring *reuse_list = NULL;
382 : : struct rte_ring *list = NULL;
383 : : struct rte_ring_zc_data zcdf = {0};
384 : : struct rte_ring_zc_data zcdc = {0};
385 : : struct rte_ring_zc_data zcdu = {0};
386 : : struct rte_ring_zc_data zcds = {0};
387 : : struct mlx5_hws_cnt_pool_caches *cache = cpool->cache;
388 : : unsigned int ret, actual_fetch_size __rte_unused;
389 : :
390 : 0 : reuse_list = cpool->reuse_list;
391 : 0 : ret = rte_ring_dequeue_zc_burst_elem_start(reuse_list,
392 : : sizeof(cnt_id_t), cache->fetch_sz, &zcdu, NULL);
393 : : zcds = zcdu;
394 : : list = reuse_list;
395 [ # # # # : 0 : if (unlikely(ret == 0)) { /* no reuse counter. */
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
396 : : rte_ring_dequeue_zc_elem_finish(reuse_list, 0);
397 : 0 : free_list = cpool->free_list;
398 [ # # # # : 0 : ret = rte_ring_dequeue_zc_burst_elem_start(free_list,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
399 : : sizeof(cnt_id_t),
400 : : cache->fetch_sz,
401 : : &zcdf, NULL);
402 : : zcds = zcdf;
403 : : list = free_list;
404 [ # # # # : 0 : if (unlikely(ret == 0)) { /* no free counter. */
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
405 : : rte_ring_dequeue_zc_elem_finish(free_list, 0);
406 [ # # # # : 0 : if (rte_ring_count(cpool->wait_reset_list))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
407 : : return -EAGAIN;
408 : 0 : return -ENOENT;
409 : : }
410 : : }
411 : : actual_fetch_size = ret;
412 : : ret = rte_ring_enqueue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
413 : : ret, &zcdc, NULL);
414 : : MLX5_ASSERT(ret == actual_fetch_size);
415 : : __hws_cnt_r2rcpy(&zcdc, &zcds, ret);
416 : : rte_ring_dequeue_zc_elem_finish(list, ret);
417 : : rte_ring_enqueue_zc_elem_finish(qcache, ret);
418 : : return 0;
419 : : }
420 : :
421 : : static __rte_always_inline int
422 : : __mlx5_hws_cnt_pool_enqueue_revert(struct rte_ring *r, unsigned int n,
423 : : struct rte_ring_zc_data *zcd)
424 : : {
425 : : uint32_t current_head = 0;
426 : : uint32_t revert2head = 0;
427 : :
428 : : MLX5_ASSERT(r->prod.sync_type == RTE_RING_SYNC_ST);
429 : : MLX5_ASSERT(r->cons.sync_type == RTE_RING_SYNC_ST);
430 : 0 : current_head = rte_atomic_load_explicit(&r->prod.head, rte_memory_order_relaxed);
431 : : MLX5_ASSERT(n <= r->capacity);
432 : : MLX5_ASSERT(n <= rte_ring_count(r));
433 : 0 : revert2head = current_head - n;
434 : 0 : r->prod.head = revert2head; /* This ring should be SP. */
435 : : __rte_ring_get_elem_addr(r, revert2head, sizeof(cnt_id_t), n,
436 : : &zcd->ptr1, &zcd->n1, &zcd->ptr2);
437 : : /* Update tail */
438 [ # # # ]: 0 : rte_atomic_store_explicit(&r->prod.tail, revert2head, rte_memory_order_release);
439 : : return n;
440 : : }
441 : :
442 : : /**
443 : : * Put one counter back in the mempool.
444 : : *
445 : : * @param cpool
446 : : * A pointer to the counter pool structure.
447 : : * @param queue
448 : : * A pointer to HWS queue. If null, it means put into common pool.
449 : : * @param cnt_id
450 : : * A counter id to be added.
451 : : */
452 : : static __rte_always_inline void
453 : : mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
454 : : cnt_id_t *cnt_id)
455 : : {
456 : : unsigned int ret = 0;
457 : : struct mlx5_hws_cnt_pool *hpool;
458 : : struct rte_ring_zc_data zcdc = {0};
459 : : struct rte_ring_zc_data zcdr = {0};
460 : : struct rte_ring *qcache = NULL;
461 : : unsigned int wb_num = 0; /* cache write-back number. */
462 : : uint32_t iidx;
463 : :
464 : : hpool = mlx5_hws_cnt_host_pool(cpool);
465 [ # # # # ]: 0 : iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
466 : 0 : mlx5_hws_cnt_set_all(&hpool->pool[iidx], 0, 0, 0);
467 : 0 : rte_atomic_store_explicit(&hpool->pool[iidx].query_gen_when_free,
468 : : rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed),
469 : : rte_memory_order_relaxed);
470 [ # # # # ]: 0 : if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
471 : 0 : qcache = hpool->cache->qcache[*queue];
472 [ # # ]: 0 : if (unlikely(qcache == NULL)) {
473 [ # # # # : 0 : ret = rte_ring_enqueue_elem(hpool->wait_reset_list, cnt_id,
# # # # #
# # # # #
# ]
474 : : sizeof(cnt_id_t));
475 : : MLX5_ASSERT(ret == 0);
476 : : return;
477 : : }
478 : : ret = rte_ring_enqueue_burst_elem(qcache, cnt_id, sizeof(cnt_id_t), 1,
479 : : NULL);
480 [ # # ]: 0 : if (unlikely(ret == 0)) { /* cache is full. */
481 [ # # ]: 0 : struct rte_ring *reset_list = cpool->wait_reset_list;
482 : :
483 [ # # ]: 0 : wb_num = rte_ring_count(qcache) - cpool->cache->threshold;
484 : : MLX5_ASSERT(wb_num < rte_ring_count(qcache));
485 : : __mlx5_hws_cnt_pool_enqueue_revert(qcache, wb_num, &zcdc);
486 : : ret = rte_ring_enqueue_zc_burst_elem_start(reset_list,
487 : : sizeof(cnt_id_t),
488 : : wb_num, &zcdr, NULL);
489 : : MLX5_ASSERT(ret == wb_num);
490 : : __hws_cnt_r2rcpy(&zcdr, &zcdc, ret);
491 : : rte_ring_enqueue_zc_elem_finish(reset_list, ret);
492 : : /* write-back THIS counter too */
493 : : ret = rte_ring_enqueue_burst_elem(reset_list, cnt_id,
494 : : sizeof(cnt_id_t), 1, NULL);
495 : : }
496 : : MLX5_ASSERT(ret == 1);
497 : : }
498 : :
499 : : /**
500 : : * Get one counter from the pool.
501 : : *
502 : : * If @param queue is not null, objects will be retrieved first from queue's
503 : : * cache, subsequently from the common pool. Note that it can return -ENOENT
504 : : * when the local cache and common pool are empty, even if cache from other
505 : : * queue are full.
506 : : *
507 : : * @param cntp
508 : : * A pointer to the counter pool structure.
509 : : * @param queue
510 : : * A pointer to HWS queue. If null, it means fetch from common pool.
511 : : * @param cnt_id
512 : : * A pointer to a cnt_id_t * pointer (counter id) that will be filled.
513 : : * @param age_idx
514 : : * Index of AGE parameter using this counter, zero means there is no such AGE.
515 : : *
516 : : * @return
517 : : * - 0: Success; objects taken.
518 : : * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
519 : : * - -EAGAIN: counter is not ready; try again.
520 : : */
521 : : static __rte_always_inline int
522 : : mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
523 : : cnt_id_t *cnt_id, uint32_t age_idx, uint32_t shared)
524 : : {
525 : : unsigned int ret;
526 : : struct rte_ring_zc_data zcdc = {0};
527 : : struct rte_ring *qcache = NULL;
528 : : uint32_t iidx, query_gen = 0;
529 [ # # # # : 0 : cnt_id_t tmp_cid = 0;
# # # # #
# # # #
# ]
530 : :
531 [ # # # # : 0 : if (likely(queue != NULL && cpool->cfg.host_cpool == NULL))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
532 : 0 : qcache = cpool->cache->qcache[*queue];
533 [ # # # # : 0 : if (unlikely(qcache == NULL)) {
# # # # #
# # # # #
# # # # #
# ]
534 : : cpool = mlx5_hws_cnt_host_pool(cpool);
535 [ # # # # : 0 : ret = rte_ring_dequeue_elem(cpool->reuse_list, &tmp_cid,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
536 : : sizeof(cnt_id_t));
537 [ # # # # : 0 : if (unlikely(ret != 0)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
538 [ # # # # : 0 : ret = rte_ring_dequeue_elem(cpool->free_list, &tmp_cid,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
539 : : sizeof(cnt_id_t));
540 [ # # # # : 0 : if (unlikely(ret != 0)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
541 [ # # # # : 0 : if (rte_ring_count(cpool->wait_reset_list))
# # # # #
# ]
542 : : return -EAGAIN;
543 : : return -ENOENT;
544 : : }
545 : : }
546 [ # # # # : 0 : *cnt_id = tmp_cid;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
547 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
548 : : __hws_cnt_query_raw(cpool, *cnt_id,
549 : : &cpool->pool[iidx].reset.hits,
550 : 0 : &cpool->pool[iidx].reset.bytes);
551 : : mlx5_hws_cnt_set_all(&cpool->pool[iidx], 1, shared, age_idx);
552 : : return 0;
553 : : }
554 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,
555 : : &zcdc, NULL);
556 [ # # # # : 0 : if (unlikely(ret == 0)) { /* local cache is empty. */
# # # # #
# # # # #
# # # # #
# ]
557 : : rte_ring_dequeue_zc_elem_finish(qcache, 0);
558 : : /* let's fetch from global free list. */
559 [ # # # # : 0 : ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
560 [ # # # # : 0 : if (unlikely(ret != 0))
# # # # #
# # # # #
# # # # #
# ]
561 : : return ret;
562 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
563 : : sizeof(cnt_id_t), 1,
564 : : &zcdc, NULL);
565 : : MLX5_ASSERT(ret == 1);
566 : : }
567 : : /* get one from local cache. */
568 [ # # # # : 0 : *cnt_id = (*(cnt_id_t *)zcdc.ptr1);
# # # # #
# # # # #
# # # # #
# ]
569 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
570 : 0 : query_gen = rte_atomic_load_explicit(&cpool->pool[iidx].query_gen_when_free,
571 : : rte_memory_order_relaxed);
572 : : /* counter is waiting to reset. */
573 [ # # # # : 0 : if (rte_atomic_load_explicit(&cpool->query_gen, rte_memory_order_relaxed) == query_gen) {
# # # # #
# # # # #
# # # # #
# ]
574 : : rte_ring_dequeue_zc_elem_finish(qcache, 0);
575 : : /* write-back counter to reset list. */
576 : 0 : mlx5_hws_cnt_pool_cache_flush(cpool, *queue);
577 : : /* let's fetch from global free list. */
578 [ # # # # : 0 : ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
579 [ # # # # : 0 : if (unlikely(ret != 0))
# # # # #
# # # # #
# # # # #
# ]
580 : : return ret;
581 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
582 : : sizeof(cnt_id_t), 1,
583 : : &zcdc, NULL);
584 : : MLX5_ASSERT(ret == 1);
585 [ # # # # : 0 : *cnt_id = *(cnt_id_t *)zcdc.ptr1;
# # # # #
# # # # #
# # # # #
# ]
586 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
587 : : }
588 : : __hws_cnt_query_raw(cpool, *cnt_id, &cpool->pool[iidx].reset.hits,
589 [ # # # # : 0 : &cpool->pool[iidx].reset.bytes);
# # # # #
# # # # #
# # # # #
# ]
590 : : rte_ring_dequeue_zc_elem_finish(qcache, 1);
591 : 0 : mlx5_hws_cnt_set_all(&cpool->pool[iidx], 1, shared, age_idx);
592 : : return 0;
593 : : }
594 : :
595 : : /**
596 : : * Decide if the given queue can be used to perform counter allocation/deallcation
597 : : * based on counter configuration
598 : : *
599 : : * @param[in] priv
600 : : * Pointer to the port private data structure.
601 : : * @param[in] queue
602 : : * Pointer to the queue index.
603 : : *
604 : : * @return
605 : : * @p queue if cache related to the queue can be used. NULL otherwise.
606 : : */
607 : : static __rte_always_inline uint32_t *
608 : : mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue)
609 : : {
610 [ # # # # : 0 : if (priv && priv->hws_cpool) {
# # # # #
# # # # #
# # # # #
# # # ]
611 : : /* Do not use queue cache if counter pool is shared. */
612 [ # # # # : 0 : if (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
613 : : return NULL;
614 : : /* Do not use queue cache if counter cache is disabled. */
615 [ # # # # : 0 : if (priv->hws_cpool->cache == NULL)
# # # # #
# # # # #
# # # # #
# # # ]
616 : : return NULL;
617 : 0 : return queue;
618 : : }
619 : : /* This case should not be reached if counter pool was successfully configured. */
620 : : MLX5_ASSERT(false);
621 : : return NULL;
622 : : }
623 : :
624 : : static __rte_always_inline unsigned int
625 : : mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool)
626 : : {
627 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
628 : :
629 [ # # # # ]: 0 : return rte_ring_get_capacity(hpool->free_list);
630 : : }
631 : :
632 : : static __rte_always_inline int
633 : : mlx5_hws_cnt_pool_get_action_offset(struct mlx5_hws_cnt_pool *cpool,
634 : : cnt_id_t cnt_id, struct mlx5dr_action **action,
635 : : uint32_t *offset)
636 : : {
637 : 0 : uint8_t idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
638 : :
639 : 0 : idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
640 : 0 : *action = cpool->dcs_mng.dcs[idx].dr_action;
641 : 0 : *offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
642 : : return 0;
643 : : }
644 : :
645 : : static __rte_always_inline int
646 : : mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
647 : : uint32_t age_idx)
648 : : {
649 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
650 : :
651 : : return mlx5_hws_cnt_pool_get(hpool, NULL, cnt_id, age_idx, 1);
652 : : }
653 : :
654 : : static __rte_always_inline void
655 : : mlx5_hws_cnt_shared_put(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
656 : : {
657 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
658 : :
659 : : mlx5_hws_cnt_pool_put(hpool, NULL, cnt_id);
660 : : }
661 : :
662 : : static __rte_always_inline bool
663 : : mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
664 : : {
665 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
666 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
667 : : uint32_t share;
668 : :
669 [ # # ]: 0 : mlx5_hws_cnt_get_all(&hpool->pool[iidx], NULL, &share, NULL);
670 : : return !!share;
671 : : }
672 : :
673 : : static __rte_always_inline void
674 : : mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
675 : : uint32_t age_idx)
676 : : {
677 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
678 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
679 : :
680 : : MLX5_ASSERT(hpool->pool[iidx].cnt_state.share);
681 : 0 : mlx5_hws_cnt_set_age_idx(&hpool->pool[iidx], age_idx);
682 : 0 : }
683 : :
684 : : static __rte_always_inline uint32_t
685 : : mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
686 : : {
687 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
688 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
689 : : uint32_t age_idx, share;
690 : :
691 [ # # # # : 0 : mlx5_hws_cnt_get_all(&hpool->pool[iidx], NULL, &share, &age_idx);
# # # # #
# # # ]
692 : : MLX5_ASSERT(share);
693 : : return age_idx;
694 : : }
695 : :
696 : : static __rte_always_inline cnt_id_t
697 : : mlx5_hws_age_cnt_get(struct mlx5_priv *priv, struct mlx5_hws_age_param *param,
698 : : uint32_t age_idx)
699 : : {
700 [ # # # # : 0 : if (!param->own_cnt_index) {
# # # # #
# ]
701 : : /* Create indirect counter one for internal usage. */
702 [ # # # # : 0 : if (mlx5_hws_cnt_shared_get(priv->hws_cpool,
# # # # #
# ]
703 : : ¶m->own_cnt_index, age_idx) < 0)
704 : : return 0;
705 : 0 : param->nb_cnts++;
706 : : }
707 [ # # # # : 0 : return param->own_cnt_index;
# # # # #
# ]
708 : : }
709 : :
710 : : static __rte_always_inline void
711 : : mlx5_hws_age_nb_cnt_increase(struct mlx5_priv *priv, uint32_t age_idx)
712 : : {
713 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
714 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
715 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
716 : :
717 : : MLX5_ASSERT(param != NULL);
718 : 0 : param->nb_cnts++;
719 : 0 : }
720 : :
721 : : static __rte_always_inline void
722 : : mlx5_hws_age_nb_cnt_decrease(struct mlx5_priv *priv, uint32_t age_idx)
723 : : {
724 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
725 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
726 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
727 : :
728 [ # # # # ]: 0 : if (param != NULL)
729 : 0 : param->nb_cnts--;
730 : : }
731 : :
732 : : static __rte_always_inline bool
733 : : mlx5_hws_age_is_indirect(uint32_t age_idx)
734 : : {
735 [ # # # # ]: 0 : return (age_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
736 : : MLX5_INDIRECT_ACTION_TYPE_AGE ? true : false;
737 : : }
738 : :
739 : : /* init HWS counter pool. */
740 : : int
741 : : mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh);
742 : :
743 : : void
744 : : mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh);
745 : :
746 : : int
747 : : mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
748 : : uint32_t nb_counters, uint16_t nb_queue,
749 : : struct mlx5_hws_cnt_pool *chost, struct rte_flow_error *error);
750 : :
751 : : void
752 : : mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,
753 : : struct mlx5_hws_cnt_pool *cpool);
754 : :
755 : : int
756 : : mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh,
757 : : struct rte_flow_error *error);
758 : :
759 : : void
760 : : mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh);
761 : :
762 : : int
763 : : mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
764 : : struct rte_flow_error *error);
765 : :
766 : : uint32_t
767 : : mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
768 : : bool shared, const struct rte_flow_action_age *age,
769 : : uint32_t flow_idx, struct rte_flow_error *error);
770 : :
771 : : int
772 : : mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
773 : : const void *update, struct rte_flow_error *error);
774 : :
775 : : void *
776 : : mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx);
777 : :
778 : : int
779 : : mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
780 : : uint32_t nb_aging_objects,
781 : : uint16_t nb_queues,
782 : : bool strict_queue);
783 : :
784 : : void
785 : : mlx5_hws_age_pool_destroy(struct mlx5_priv *priv);
786 : :
787 : : #endif /* _MLX5_HWS_CNT_H_ */
|