Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright 2022 Mellanox Technologies, Ltd
3 : : */
4 : :
5 : : #ifndef _MLX5_HWS_CNT_H_
6 : : #define _MLX5_HWS_CNT_H_
7 : :
8 : : #include <rte_ring.h>
9 : : #include "mlx5_utils.h"
10 : : #include "mlx5_flow.h"
11 : :
12 : : /*
13 : : * HWS COUNTER ID's layout
14 : : * 3 2 1 0
15 : : * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
16 : : * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
17 : : * | T | | D | |
18 : : * ~ Y | | C | IDX ~
19 : : * | P | | S | |
20 : : * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
21 : : *
22 : : * Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
23 : : * Bit 25:24 = DCS index
24 : : * Bit 23:00 = IDX in this counter belonged DCS bulk.
25 : : */
26 : :
27 : : #define MLX5_HWS_CNT_DCS_IDX_OFFSET 24
28 : : #define MLX5_HWS_CNT_DCS_IDX_MASK 0x3
29 : : #define MLX5_HWS_CNT_IDX_MASK ((1UL << MLX5_HWS_CNT_DCS_IDX_OFFSET) - 1)
30 : :
31 : : #define MLX5_HWS_AGE_IDX_MASK (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)
32 : :
33 : : struct mlx5_hws_cnt_dcs {
34 : : void *dr_action;
35 : : uint32_t batch_sz;
36 : : uint32_t iidx; /* internal index of first counter in this bulk. */
37 : : struct mlx5_devx_obj *obj;
38 : : };
39 : :
40 : : struct mlx5_hws_cnt_dcs_mng {
41 : : uint32_t batch_total;
42 : : struct mlx5_hws_cnt_dcs dcs[MLX5_HWS_CNT_DCS_NUM];
43 : : };
44 : :
45 : : struct mlx5_hws_cnt {
46 : : struct flow_counter_stats reset;
47 : : bool in_used; /* Indicator whether this counter in used or in pool. */
48 : : union {
49 : : struct {
50 : : uint32_t share:1;
51 : : /*
52 : : * share will be set to 1 when this counter is used as
53 : : * indirect action.
54 : : */
55 : : uint32_t age_idx:24;
56 : : /*
57 : : * When this counter uses for aging, it save the index
58 : : * of AGE parameter. For pure counter (without aging)
59 : : * this index is zero.
60 : : */
61 : : };
62 : : /* This struct is only meaningful when user own this counter. */
63 : : uint32_t query_gen_when_free;
64 : : /*
65 : : * When PMD own this counter (user put back counter to PMD
66 : : * counter pool, i.e), this field recorded value of counter
67 : : * pools query generation at time user release the counter.
68 : : */
69 : : };
70 : : };
71 : :
72 : : struct mlx5_hws_cnt_raw_data_mng {
73 : : struct flow_counter_stats *raw;
74 : : struct mlx5_pmd_mr mr;
75 : : };
76 : :
77 : : struct mlx5_hws_cache_param {
78 : : uint32_t size;
79 : : uint32_t q_num;
80 : : uint32_t fetch_sz;
81 : : uint32_t threshold;
82 : : uint32_t preload_sz;
83 : : };
84 : :
85 : : struct mlx5_hws_cnt_pool_cfg {
86 : : char *name;
87 : : uint32_t request_num;
88 : : uint32_t alloc_factor;
89 : : struct mlx5_hws_cnt_pool *host_cpool;
90 : : };
91 : :
92 : : struct mlx5_hws_cnt_pool_caches {
93 : : uint32_t fetch_sz;
94 : : uint32_t threshold;
95 : : uint32_t preload_sz;
96 : : uint32_t q_num;
97 : : struct rte_ring *qcache[];
98 : : };
99 : :
100 : : struct __rte_cache_aligned mlx5_hws_cnt_pool {
101 : : LIST_ENTRY(mlx5_hws_cnt_pool) next;
102 : : alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
103 : : alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
104 : : alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) query_gen;
105 : : struct mlx5_hws_cnt *pool;
106 : : struct mlx5_hws_cnt_raw_data_mng *raw_mng;
107 : : struct rte_ring *reuse_list;
108 : : struct rte_ring *free_list;
109 : : struct rte_ring *wait_reset_list;
110 : : struct mlx5_hws_cnt_pool_caches *cache;
111 : : uint64_t time_of_last_age_check;
112 : : struct mlx5_priv *priv;
113 : : };
114 : :
115 : : /* HWS AGE status. */
116 : : enum {
117 : : HWS_AGE_FREE, /* Initialized state. */
118 : : HWS_AGE_CANDIDATE, /* AGE assigned to flows. */
119 : : HWS_AGE_CANDIDATE_INSIDE_RING,
120 : : /*
121 : : * AGE assigned to flows but it still in ring. It was aged-out but the
122 : : * timeout was changed, so it in ring but still candidate.
123 : : */
124 : : HWS_AGE_AGED_OUT_REPORTED,
125 : : /*
126 : : * Aged-out, reported by rte_flow_get_q_aged_flows and wait for destroy.
127 : : */
128 : : HWS_AGE_AGED_OUT_NOT_REPORTED,
129 : : /*
130 : : * Aged-out, inside the aged-out ring.
131 : : * wait for rte_flow_get_q_aged_flows and destroy.
132 : : */
133 : : };
134 : :
135 : : /* HWS counter age parameter. */
136 : : struct __rte_cache_aligned __rte_packed_begin mlx5_hws_age_param {
137 : : RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
138 : : RTE_ATOMIC(uint32_t) sec_since_last_hit;
139 : : /* Time in seconds since last hit (atomically accessed). */
140 : : RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
141 : : uint64_t accumulator_last_hits;
142 : : /* Last total value of hits for comparing. */
143 : : uint64_t accumulator_hits;
144 : : /* Accumulator for hits coming from several counters. */
145 : : uint32_t accumulator_cnt;
146 : : /* Number counters which already updated the accumulator in this sec. */
147 : : uint32_t nb_cnts; /* Number counters used by this AGE. */
148 : : uint32_t queue_id; /* Queue id of the counter. */
149 : : cnt_id_t own_cnt_index;
150 : : /* Counter action created specifically for this AGE action. */
151 : : void *context; /* Flow AGE context. */
152 : : } __rte_packed_end;
153 : :
154 : :
155 : : /**
156 : : * Return the actual counter pool should be used in cross vHCA sharing mode.
157 : : * as index of raw/cnt pool.
158 : : *
159 : : * @param cnt_id
160 : : * The external counter id
161 : : * @return
162 : : * Internal index
163 : : */
164 : : static __rte_always_inline struct mlx5_hws_cnt_pool *
165 : : mlx5_hws_cnt_host_pool(struct mlx5_hws_cnt_pool *cpool)
166 : : {
167 [ # # # # : 0 : return cpool->cfg.host_cpool ? cpool->cfg.host_cpool : cpool;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
168 : : }
169 : :
170 : : /**
171 : : * Translate counter id into internal index (start from 0), which can be used
172 : : * as index of raw/cnt pool.
173 : : *
174 : : * @param cnt_id
175 : : * The external counter id
176 : : * @return
177 : : * Internal index
178 : : */
179 : : static __rte_always_inline uint32_t
180 : : mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
181 : : {
182 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
183 : 0 : uint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
184 : 0 : uint32_t offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
185 : :
186 : 0 : dcs_idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
187 : 0 : return (hpool->dcs_mng.dcs[dcs_idx].iidx + offset);
188 : : }
189 : :
190 : : /**
191 : : * Check if it's valid counter id.
192 : : */
193 : : static __rte_always_inline bool
194 : : mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
195 : : {
196 [ # # # # : 0 : return (cnt_id >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
# # # # #
# # # #
# ]
197 : : MLX5_INDIRECT_ACTION_TYPE_COUNT ? true : false;
198 : : }
199 : :
200 : : /**
201 : : * Generate Counter id from internal index.
202 : : *
203 : : * @param cpool
204 : : * The pointer to counter pool
205 : : * @param iidx
206 : : * The internal counter index.
207 : : *
208 : : * @return
209 : : * Counter id
210 : : */
211 : : static __rte_always_inline cnt_id_t
212 : : mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)
213 : : {
214 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
215 : : struct mlx5_hws_cnt_dcs_mng *dcs_mng = &hpool->dcs_mng;
216 : : uint32_t idx;
217 : : uint32_t offset;
218 : : cnt_id_t cnt_id;
219 : :
220 [ # # ]: 0 : for (idx = 0, offset = iidx; idx < dcs_mng->batch_total; idx++) {
221 [ # # ]: 0 : if (dcs_mng->dcs[idx].batch_sz <= offset)
222 : 0 : offset -= dcs_mng->dcs[idx].batch_sz;
223 : : else
224 : : break;
225 : : }
226 : : cnt_id = offset;
227 : 0 : cnt_id |= (idx << MLX5_HWS_CNT_DCS_IDX_OFFSET);
228 : : return (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
229 [ # # # # : 0 : MLX5_INDIRECT_ACTION_TYPE_OFFSET) | cnt_id;
# ]
230 : : }
231 : :
232 : : static __rte_always_inline void
233 : : __hws_cnt_query_raw(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
234 : : uint64_t *raw_pkts, uint64_t *raw_bytes)
235 : : {
236 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
237 : 0 : struct mlx5_hws_cnt_raw_data_mng *raw_mng = hpool->raw_mng;
238 : : struct flow_counter_stats s[2];
239 : : uint8_t i = 0x1;
240 : : size_t stat_sz = sizeof(s[0]);
241 : : uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
242 : :
243 : 0 : memcpy(&s[0], &raw_mng->raw[iidx], stat_sz);
244 : : do {
245 [ # # # # : 0 : memcpy(&s[i & 1], &raw_mng->raw[iidx], stat_sz);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
246 [ # # # # : 0 : if (memcmp(&s[0], &s[1], stat_sz) == 0) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
247 [ # # # # : 0 : *raw_pkts = rte_be_to_cpu_64(s[0].hits);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
248 [ # # # # : 0 : *raw_bytes = rte_be_to_cpu_64(s[0].bytes);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
249 : : break;
250 : : }
251 : 0 : i = ~i;
252 : : } while (1);
253 : : }
254 : :
255 : : /**
256 : : * Copy elements from one zero-copy ring to zero-copy ring in place.
257 : : *
258 : : * The input is a rte ring zero-copy data struct, which has two pointer.
259 : : * in case of the wrapper happened, the ptr2 will be meaningful.
260 : : *
261 : : * So this routine needs to consider the situation that the address given by
262 : : * source and destination could be both wrapped.
263 : : * First, calculate the first number of element needs to be copied until wrapped
264 : : * address, which could be in source or destination.
265 : : * Second, copy left number of element until second wrapped address. If in first
266 : : * step the wrapped address is source, then this time it must be in destination.
267 : : * and vice-versa.
268 : : * Third, copy all left number of element.
269 : : *
270 : : * In worst case, we need copy three pieces of continuous memory.
271 : : *
272 : : * @param zcdd
273 : : * A pointer to zero-copy data of destination ring.
274 : : * @param zcds
275 : : * A pointer to zero-copy data of source ring.
276 : : * @param n
277 : : * Number of elements to copy.
278 : : */
279 : : static __rte_always_inline void
280 : : __hws_cnt_r2rcpy(struct rte_ring_zc_data *zcdd, struct rte_ring_zc_data *zcds,
281 : : unsigned int n)
282 : : {
283 : : unsigned int n1, n2, n3;
284 : : void *s1, *s2, *s3;
285 : : void *d1, *d2, *d3;
286 : :
287 : : s1 = zcds->ptr1;
288 : : d1 = zcdd->ptr1;
289 : 0 : n1 = RTE_MIN(zcdd->n1, zcds->n1);
290 [ # # # # : 0 : if (zcds->n1 > n1) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
291 : 0 : n2 = zcds->n1 - n1;
292 : 0 : s2 = RTE_PTR_ADD(zcds->ptr1, sizeof(cnt_id_t) * n1);
293 : : d2 = zcdd->ptr2;
294 : 0 : n3 = n - n1 - n2;
295 : : s3 = zcds->ptr2;
296 : 0 : d3 = RTE_PTR_ADD(zcdd->ptr2, sizeof(cnt_id_t) * n2);
297 : : } else {
298 : 0 : n2 = zcdd->n1 - n1;
299 : : s2 = zcds->ptr2;
300 : 0 : d2 = RTE_PTR_ADD(zcdd->ptr1, sizeof(cnt_id_t) * n1);
301 : 0 : n3 = n - n1 - n2;
302 : 0 : s3 = RTE_PTR_ADD(zcds->ptr2, sizeof(cnt_id_t) * n2);
303 : : d3 = zcdd->ptr2;
304 : : }
305 [ # # # # : 0 : memcpy(d1, s1, n1 * sizeof(cnt_id_t));
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
306 [ # # # # : 0 : if (n2 != 0)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
307 : 0 : memcpy(d2, s2, n2 * sizeof(cnt_id_t));
308 [ # # # # : 0 : if (n3 != 0)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
309 : 0 : memcpy(d3, s3, n3 * sizeof(cnt_id_t));
310 : : }
311 : :
312 : : static __rte_always_inline int
313 : : mlx5_hws_cnt_pool_cache_flush(struct mlx5_hws_cnt_pool *cpool,
314 : : uint32_t queue_id)
315 : : {
316 : : unsigned int ret __rte_unused;
317 : : struct rte_ring_zc_data zcdr = {0};
318 : : struct rte_ring_zc_data zcdc = {0};
319 : : struct rte_ring *reset_list = NULL;
320 : 0 : struct rte_ring *qcache = cpool->cache->qcache[queue_id];
321 : 0 : uint32_t ring_size = rte_ring_count(qcache);
322 : :
323 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
324 : : ring_size, &zcdc, NULL);
325 : : MLX5_ASSERT(ret == ring_size);
326 [ # # # # : 0 : reset_list = cpool->wait_reset_list;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
327 : : ret = rte_ring_enqueue_zc_burst_elem_start(reset_list, sizeof(cnt_id_t),
328 : : ring_size, &zcdr, NULL);
329 : : MLX5_ASSERT(ret == ring_size);
330 : : __hws_cnt_r2rcpy(&zcdr, &zcdc, ring_size);
331 : : rte_ring_enqueue_zc_elem_finish(reset_list, ring_size);
332 : : rte_ring_dequeue_zc_elem_finish(qcache, ring_size);
333 : : return 0;
334 : : }
335 : :
336 : : static __rte_always_inline int
337 : : mlx5_hws_cnt_pool_cache_fetch(struct mlx5_hws_cnt_pool *cpool,
338 : : uint32_t queue_id)
339 : : {
340 : 0 : struct rte_ring *qcache = cpool->cache->qcache[queue_id];
341 : : struct rte_ring *free_list = NULL;
342 : : struct rte_ring *reuse_list = NULL;
343 : : struct rte_ring *list = NULL;
344 : : struct rte_ring_zc_data zcdf = {0};
345 : : struct rte_ring_zc_data zcdc = {0};
346 : : struct rte_ring_zc_data zcdu = {0};
347 : : struct rte_ring_zc_data zcds = {0};
348 : : struct mlx5_hws_cnt_pool_caches *cache = cpool->cache;
349 : : unsigned int ret, actual_fetch_size __rte_unused;
350 : :
351 : 0 : reuse_list = cpool->reuse_list;
352 : 0 : ret = rte_ring_dequeue_zc_burst_elem_start(reuse_list,
353 : : sizeof(cnt_id_t), cache->fetch_sz, &zcdu, NULL);
354 : : zcds = zcdu;
355 : : list = reuse_list;
356 [ # # # # : 0 : if (unlikely(ret == 0)) { /* no reuse counter. */
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
357 : : rte_ring_dequeue_zc_elem_finish(reuse_list, 0);
358 : 0 : free_list = cpool->free_list;
359 [ # # # # : 0 : ret = rte_ring_dequeue_zc_burst_elem_start(free_list,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
360 : : sizeof(cnt_id_t),
361 : : cache->fetch_sz,
362 : : &zcdf, NULL);
363 : : zcds = zcdf;
364 : : list = free_list;
365 [ # # # # : 0 : if (unlikely(ret == 0)) { /* no free counter. */
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
366 : : rte_ring_dequeue_zc_elem_finish(free_list, 0);
367 [ # # # # : 0 : if (rte_ring_count(cpool->wait_reset_list))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
368 : : return -EAGAIN;
369 : 0 : return -ENOENT;
370 : : }
371 : : }
372 : : actual_fetch_size = ret;
373 : : ret = rte_ring_enqueue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
374 : : ret, &zcdc, NULL);
375 : : MLX5_ASSERT(ret == actual_fetch_size);
376 : : __hws_cnt_r2rcpy(&zcdc, &zcds, ret);
377 : : rte_ring_dequeue_zc_elem_finish(list, ret);
378 : : rte_ring_enqueue_zc_elem_finish(qcache, ret);
379 : : return 0;
380 : : }
381 : :
382 : : static __rte_always_inline int
383 : : __mlx5_hws_cnt_pool_enqueue_revert(struct rte_ring *r, unsigned int n,
384 : : struct rte_ring_zc_data *zcd)
385 : : {
386 : : uint32_t current_head = 0;
387 : : uint32_t revert2head = 0;
388 : :
389 : : MLX5_ASSERT(r->prod.sync_type == RTE_RING_SYNC_ST);
390 : : MLX5_ASSERT(r->cons.sync_type == RTE_RING_SYNC_ST);
391 : 0 : current_head = rte_atomic_load_explicit(&r->prod.head, rte_memory_order_relaxed);
392 : : MLX5_ASSERT(n <= r->capacity);
393 : : MLX5_ASSERT(n <= rte_ring_count(r));
394 : 0 : revert2head = current_head - n;
395 : 0 : r->prod.head = revert2head; /* This ring should be SP. */
396 : : __rte_ring_get_elem_addr(r, revert2head, sizeof(cnt_id_t), n,
397 : : &zcd->ptr1, &zcd->n1, &zcd->ptr2);
398 : : /* Update tail */
399 [ # # # ]: 0 : rte_atomic_store_explicit(&r->prod.tail, revert2head, rte_memory_order_release);
400 : : return n;
401 : : }
402 : :
403 : : /**
404 : : * Put one counter back in the mempool.
405 : : *
406 : : * @param cpool
407 : : * A pointer to the counter pool structure.
408 : : * @param queue
409 : : * A pointer to HWS queue. If null, it means put into common pool.
410 : : * @param cnt_id
411 : : * A counter id to be added.
412 : : */
413 : : static __rte_always_inline void
414 : : mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
415 : : cnt_id_t *cnt_id)
416 : : {
417 : : unsigned int ret = 0;
418 : : struct mlx5_hws_cnt_pool *hpool;
419 : : struct rte_ring_zc_data zcdc = {0};
420 : : struct rte_ring_zc_data zcdr = {0};
421 : : struct rte_ring *qcache = NULL;
422 : : unsigned int wb_num = 0; /* cache write-back number. */
423 : : uint32_t iidx;
424 : :
425 : : hpool = mlx5_hws_cnt_host_pool(cpool);
426 : 0 : iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
427 : 0 : hpool->pool[iidx].in_used = false;
428 : 0 : hpool->pool[iidx].query_gen_when_free =
429 : 0 : rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
430 [ # # # # ]: 0 : if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
431 : 0 : qcache = hpool->cache->qcache[*queue];
432 [ # # ]: 0 : if (unlikely(qcache == NULL)) {
433 [ # # # # : 0 : ret = rte_ring_enqueue_elem(hpool->wait_reset_list, cnt_id,
# # # # #
# # # # #
# ]
434 : : sizeof(cnt_id_t));
435 : : MLX5_ASSERT(ret == 0);
436 : : return;
437 : : }
438 : : ret = rte_ring_enqueue_burst_elem(qcache, cnt_id, sizeof(cnt_id_t), 1,
439 : : NULL);
440 [ # # ]: 0 : if (unlikely(ret == 0)) { /* cache is full. */
441 [ # # ]: 0 : struct rte_ring *reset_list = cpool->wait_reset_list;
442 : :
443 [ # # ]: 0 : wb_num = rte_ring_count(qcache) - cpool->cache->threshold;
444 : : MLX5_ASSERT(wb_num < rte_ring_count(qcache));
445 : : __mlx5_hws_cnt_pool_enqueue_revert(qcache, wb_num, &zcdc);
446 : : ret = rte_ring_enqueue_zc_burst_elem_start(reset_list,
447 : : sizeof(cnt_id_t),
448 : : wb_num, &zcdr, NULL);
449 : : MLX5_ASSERT(ret == wb_num);
450 : : __hws_cnt_r2rcpy(&zcdr, &zcdc, ret);
451 : : rte_ring_enqueue_zc_elem_finish(reset_list, ret);
452 : : /* write-back THIS counter too */
453 : : ret = rte_ring_enqueue_burst_elem(reset_list, cnt_id,
454 : : sizeof(cnt_id_t), 1, NULL);
455 : : }
456 : : MLX5_ASSERT(ret == 1);
457 : : }
458 : :
459 : : /**
460 : : * Get one counter from the pool.
461 : : *
462 : : * If @param queue is not null, objects will be retrieved first from queue's
463 : : * cache, subsequently from the common pool. Note that it can return -ENOENT
464 : : * when the local cache and common pool are empty, even if cache from other
465 : : * queue are full.
466 : : *
467 : : * @param cntp
468 : : * A pointer to the counter pool structure.
469 : : * @param queue
470 : : * A pointer to HWS queue. If null, it means fetch from common pool.
471 : : * @param cnt_id
472 : : * A pointer to a cnt_id_t * pointer (counter id) that will be filled.
473 : : * @param age_idx
474 : : * Index of AGE parameter using this counter, zero means there is no such AGE.
475 : : *
476 : : * @return
477 : : * - 0: Success; objects taken.
478 : : * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
479 : : * - -EAGAIN: counter is not ready; try again.
480 : : */
481 : : static __rte_always_inline int
482 : : mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
483 : : cnt_id_t *cnt_id, uint32_t age_idx)
484 : : {
485 : : unsigned int ret;
486 : : struct rte_ring_zc_data zcdc = {0};
487 : : struct rte_ring *qcache = NULL;
488 : : uint32_t iidx, query_gen = 0;
489 [ # # # # : 0 : cnt_id_t tmp_cid = 0;
# # # # #
# # # #
# ]
490 : :
491 [ # # # # : 0 : if (likely(queue != NULL && cpool->cfg.host_cpool == NULL))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
492 : 0 : qcache = cpool->cache->qcache[*queue];
493 [ # # # # : 0 : if (unlikely(qcache == NULL)) {
# # # # #
# # # # #
# # # # #
# ]
494 : : cpool = mlx5_hws_cnt_host_pool(cpool);
495 [ # # # # : 0 : ret = rte_ring_dequeue_elem(cpool->reuse_list, &tmp_cid,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
496 : : sizeof(cnt_id_t));
497 [ # # # # : 0 : if (unlikely(ret != 0)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
498 [ # # # # : 0 : ret = rte_ring_dequeue_elem(cpool->free_list, &tmp_cid,
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
499 : : sizeof(cnt_id_t));
500 [ # # # # : 0 : if (unlikely(ret != 0)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
501 [ # # # # : 0 : if (rte_ring_count(cpool->wait_reset_list))
# # # # #
# ]
502 : : return -EAGAIN;
503 : : return -ENOENT;
504 : : }
505 : : }
506 [ # # # # : 0 : *cnt_id = tmp_cid;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
507 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
508 : : __hws_cnt_query_raw(cpool, *cnt_id,
509 : : &cpool->pool[iidx].reset.hits,
510 : 0 : &cpool->pool[iidx].reset.bytes);
511 : 0 : cpool->pool[iidx].share = 0;
512 : : MLX5_ASSERT(!cpool->pool[iidx].in_used);
513 : 0 : cpool->pool[iidx].in_used = true;
514 [ # # # # ]: 0 : cpool->pool[iidx].age_idx = age_idx;
515 : : return 0;
516 : : }
517 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,
518 : : &zcdc, NULL);
519 [ # # # # : 0 : if (unlikely(ret == 0)) { /* local cache is empty. */
# # # # #
# # # # #
# # # # #
# ]
520 : : rte_ring_dequeue_zc_elem_finish(qcache, 0);
521 : : /* let's fetch from global free list. */
522 [ # # # # : 0 : ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
523 [ # # # # : 0 : if (unlikely(ret != 0))
# # # # #
# # # # #
# # # # #
# ]
524 : : return ret;
525 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
526 : : sizeof(cnt_id_t), 1,
527 : : &zcdc, NULL);
528 : : MLX5_ASSERT(ret == 1);
529 : : }
530 : : /* get one from local cache. */
531 [ # # # # : 0 : *cnt_id = (*(cnt_id_t *)zcdc.ptr1);
# # # # #
# # # # #
# # # # #
# ]
532 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
533 : 0 : query_gen = cpool->pool[iidx].query_gen_when_free;
534 [ # # # # : 0 : if (cpool->query_gen == query_gen) { /* counter is waiting to reset. */
# # # # #
# # # # #
# # # # #
# ]
535 : : rte_ring_dequeue_zc_elem_finish(qcache, 0);
536 : : /* write-back counter to reset list. */
537 : 0 : mlx5_hws_cnt_pool_cache_flush(cpool, *queue);
538 : : /* let's fetch from global free list. */
539 [ # # # # : 0 : ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
540 [ # # # # : 0 : if (unlikely(ret != 0))
# # # # #
# # # # #
# # # # #
# ]
541 : : return ret;
542 : : ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
543 : : sizeof(cnt_id_t), 1,
544 : : &zcdc, NULL);
545 : : MLX5_ASSERT(ret == 1);
546 [ # # # # : 0 : *cnt_id = *(cnt_id_t *)zcdc.ptr1;
# # # # #
# # # # #
# # # # #
# ]
547 : : iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
548 : : }
549 : : __hws_cnt_query_raw(cpool, *cnt_id, &cpool->pool[iidx].reset.hits,
550 [ # # # # : 0 : &cpool->pool[iidx].reset.bytes);
# # # # #
# # # # #
# # # # #
# ]
551 : : rte_ring_dequeue_zc_elem_finish(qcache, 1);
552 : 0 : cpool->pool[iidx].share = 0;
553 : : MLX5_ASSERT(!cpool->pool[iidx].in_used);
554 : 0 : cpool->pool[iidx].in_used = true;
555 : 0 : cpool->pool[iidx].age_idx = age_idx;
556 : : return 0;
557 : : }
558 : :
559 : : /**
560 : : * Decide if the given queue can be used to perform counter allocation/deallcation
561 : : * based on counter configuration
562 : : *
563 : : * @param[in] priv
564 : : * Pointer to the port private data structure.
565 : : * @param[in] queue
566 : : * Pointer to the queue index.
567 : : *
568 : : * @return
569 : : * @p queue if cache related to the queue can be used. NULL otherwise.
570 : : */
571 : : static __rte_always_inline uint32_t *
572 : : mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue)
573 : : {
574 [ # # # # : 0 : if (priv && priv->hws_cpool) {
# # # # #
# ]
575 : : /* Do not use queue cache if counter pool is shared. */
576 [ # # # # : 0 : if (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL)
# # # # #
# # # # #
# # # # #
# # # #
# ]
577 : : return NULL;
578 : : /* Do not use queue cache if counter cache is disabled. */
579 [ # # # # : 0 : if (priv->hws_cpool->cache == NULL)
# # # # #
# # # ]
580 : : return NULL;
581 : 0 : return queue;
582 : : }
583 : : /* This case should not be reached if counter pool was successfully configured. */
584 : : MLX5_ASSERT(false);
585 : : return NULL;
586 : : }
587 : :
588 : : static __rte_always_inline unsigned int
589 : : mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool)
590 : : {
591 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
592 : :
593 [ # # # # ]: 0 : return rte_ring_get_capacity(hpool->free_list);
594 : : }
595 : :
596 : : static __rte_always_inline int
597 : : mlx5_hws_cnt_pool_get_action_offset(struct mlx5_hws_cnt_pool *cpool,
598 : : cnt_id_t cnt_id, struct mlx5dr_action **action,
599 : : uint32_t *offset)
600 : : {
601 : 0 : uint8_t idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
602 : :
603 : 0 : idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
604 : 0 : *action = cpool->dcs_mng.dcs[idx].dr_action;
605 : 0 : *offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
606 : : return 0;
607 : : }
608 : :
609 : : static __rte_always_inline int
610 : : mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
611 : : uint32_t age_idx)
612 : : {
613 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
614 : : uint32_t iidx;
615 : : int ret;
616 : :
617 : : ret = mlx5_hws_cnt_pool_get(hpool, NULL, cnt_id, age_idx);
618 : : if (ret != 0)
619 : : return ret;
620 [ # # # # : 0 : iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
# # # # #
# ]
621 : 0 : hpool->pool[iidx].share = 1;
622 : : return 0;
623 : : }
624 : :
625 : : static __rte_always_inline void
626 : : mlx5_hws_cnt_shared_put(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
627 : : {
628 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
629 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
630 : :
631 [ # # # # ]: 0 : hpool->pool[iidx].share = 0;
632 : : mlx5_hws_cnt_pool_put(hpool, NULL, cnt_id);
633 : : }
634 : :
635 : : static __rte_always_inline bool
636 : : mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
637 : : {
638 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
639 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
640 : :
641 [ # # ]: 0 : return hpool->pool[iidx].share ? true : false;
642 : : }
643 : :
644 : : static __rte_always_inline void
645 : : mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
646 : : uint32_t age_idx)
647 : : {
648 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
649 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
650 : :
651 : : MLX5_ASSERT(hpool->pool[iidx].share);
652 : 0 : hpool->pool[iidx].age_idx = age_idx;
653 : 0 : }
654 : :
655 : : static __rte_always_inline uint32_t
656 : : mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
657 : : {
658 : : struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
659 : : uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
660 : :
661 : : MLX5_ASSERT(hpool->pool[iidx].share);
662 [ # # # # : 0 : return hpool->pool[iidx].age_idx;
# # # # #
# # # ]
663 : : }
664 : :
665 : : static __rte_always_inline cnt_id_t
666 : : mlx5_hws_age_cnt_get(struct mlx5_priv *priv, struct mlx5_hws_age_param *param,
667 : : uint32_t age_idx)
668 : : {
669 [ # # # # : 0 : if (!param->own_cnt_index) {
# # # # #
# ]
670 : : /* Create indirect counter one for internal usage. */
671 [ # # # # : 0 : if (mlx5_hws_cnt_shared_get(priv->hws_cpool,
# # # # #
# ]
672 : : ¶m->own_cnt_index, age_idx) < 0)
673 : : return 0;
674 : 0 : param->nb_cnts++;
675 : : }
676 [ # # # # : 0 : return param->own_cnt_index;
# # # # #
# ]
677 : : }
678 : :
679 : : static __rte_always_inline void
680 : : mlx5_hws_age_nb_cnt_increase(struct mlx5_priv *priv, uint32_t age_idx)
681 : : {
682 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
683 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
684 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
685 : :
686 : : MLX5_ASSERT(param != NULL);
687 : 0 : param->nb_cnts++;
688 : 0 : }
689 : :
690 : : static __rte_always_inline void
691 : : mlx5_hws_age_nb_cnt_decrease(struct mlx5_priv *priv, uint32_t age_idx)
692 : : {
693 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
694 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
695 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
696 : :
697 [ # # # # ]: 0 : if (param != NULL)
698 : 0 : param->nb_cnts--;
699 : : }
700 : :
701 : : static __rte_always_inline bool
702 : : mlx5_hws_age_is_indirect(uint32_t age_idx)
703 : : {
704 [ # # # # ]: 0 : return (age_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
705 : : MLX5_INDIRECT_ACTION_TYPE_AGE ? true : false;
706 : : }
707 : :
708 : : /* init HWS counter pool. */
709 : : int
710 : : mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh);
711 : :
712 : : void
713 : : mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh);
714 : :
715 : : int
716 : : mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
717 : : uint32_t nb_counters, uint16_t nb_queue,
718 : : struct mlx5_hws_cnt_pool *chost, struct rte_flow_error *error);
719 : :
720 : : void
721 : : mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,
722 : : struct mlx5_hws_cnt_pool *cpool);
723 : :
724 : : int
725 : : mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh,
726 : : struct rte_flow_error *error);
727 : :
728 : : void
729 : : mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh);
730 : :
731 : : int
732 : : mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
733 : : struct rte_flow_error *error);
734 : :
735 : : uint32_t
736 : : mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
737 : : bool shared, const struct rte_flow_action_age *age,
738 : : uint32_t flow_idx, struct rte_flow_error *error);
739 : :
740 : : int
741 : : mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
742 : : const void *update, struct rte_flow_error *error);
743 : :
744 : : void *
745 : : mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx);
746 : :
747 : : int
748 : : mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
749 : : uint32_t nb_aging_objects,
750 : : uint16_t nb_queues,
751 : : bool strict_queue);
752 : :
753 : : void
754 : : mlx5_hws_age_pool_destroy(struct mlx5_priv *priv);
755 : :
756 : : #endif /* _MLX5_HWS_CNT_H_ */
|